Merge branch '17521-dot-slash-glob' refs #17521
authorPeter Amstutz <peter.amstutz@curii.com>
Fri, 23 Apr 2021 21:02:41 +0000 (17:02 -0400)
committerPeter Amstutz <peter.amstutz@curii.com>
Fri, 23 Apr 2021 21:02:41 +0000 (17:02 -0400)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz@curii.com>

107 files changed:
apps/workbench/bin/yarn [deleted file]
apps/workbench/config/application.rb
apps/workbench/config/arvados_config.rb
build/rails-package-scripts/README.md
build/rails-package-scripts/postinst.sh
build/rails-package-scripts/step2.sh
build/run-build-packages.sh
doc/admin/upgrading.html.textile.liquid
doc/install/salt-multi-host.html.textile.liquid
doc/install/salt-single-host.html.textile.liquid
doc/install/salt.html.textile.liquid
lib/cloud/ec2/ec2.go
lib/cloud/ec2/ec2_test.go
lib/config/config.default.yml
lib/config/export.go
lib/config/generated_config.go
lib/controller/auth_test.go
lib/controller/federation/federation_test.go
lib/controller/federation_test.go
lib/controller/handler.go
lib/controller/handler_test.go
lib/controller/integration_test.go
lib/controller/proxy.go
lib/controller/router/request.go
lib/controller/router/router.go
lib/controller/router/router_test.go
lib/controller/server_test.go
lib/dispatchcloud/driver.go
sdk/go/arvados/collection.go
sdk/go/arvados/collection_test.go [new file with mode: 0644]
sdk/go/arvados/config.go
sdk/go/arvadosclient/arvadosclient.go
sdk/go/arvadosclient/arvadosclient_test.go
sdk/go/keepclient/discover.go
sdk/python/arvados/api.py
sdk/python/tests/nginx.conf
sdk/python/tests/test_api.py
services/api/bin/yarn [deleted file]
services/api/config/arvados_config.rb
services/api/config/initializers/legacy_jobs_api.rb
services/keep-web/handler.go
services/keep-web/handler_test.go
tools/salt-install/.gitignore [new file with mode: 0644]
tools/salt-install/README.md
tools/salt-install/Vagrantfile
tools/salt-install/config_examples/multi_host/aws/README.md [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/certs/README.md [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/docker.sls [moved from tools/salt-install/single_host/docker.sls with 100% similarity]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/locale.sls [moved from tools/salt-install/single_host/locale.sls with 100% similarity]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls [new file with mode: 0644]
tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/multiple_hostnames/README.md [new file with mode: 0644]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls [moved from tools/salt-install/single_host/arvados.sls with 67% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/docker.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/locale.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_api_configuration.sls [moved from tools/salt-install/single_host/nginx_api_configuration.sls with 100% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls [moved from tools/salt-install/single_host/nginx_controller_configuration.sls with 97% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls [moved from tools/salt-install/single_host/nginx_keepproxy_configuration.sls with 97% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls [moved from tools/salt-install/single_host/nginx_keepweb_configuration.sls with 97% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls [moved from tools/salt-install/single_host/nginx_passenger.sls with 100% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls [moved from tools/salt-install/single_host/nginx_webshell_configuration.sls with 98% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls [moved from tools/salt-install/single_host/nginx_websocket_configuration.sls with 97% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls [moved from tools/salt-install/single_host/nginx_workbench2_configuration.sls with 93% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls [moved from tools/salt-install/single_host/nginx_workbench_configuration.sls with 97% similarity]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/README.md [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/docker.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/locale.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_api_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_controller_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepproxy_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepweb_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_webshell_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_websocket_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench2_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench_configuration.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls [moved from tools/salt-install/single_host/postgresql.sls with 78% similarity]
tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls [new file with mode: 0644]
tools/salt-install/config_examples/single_host/single_hostname/states/snakeoil_certs.sls [new file with mode: 0644]
tools/salt-install/local.params.example.multiple_hosts [new file with mode: 0644]
tools/salt-install/local.params.example.single_host_multiple_hostnames [new file with mode: 0644]
tools/salt-install/local.params.example.single_host_single_hostname [new file with mode: 0644]
tools/salt-install/provision.sh
tools/salt-install/tests/run-test.sh
tools/terraform/.gitignore [new file with mode: 0644]

diff --git a/apps/workbench/bin/yarn b/apps/workbench/bin/yarn
deleted file mode 100755 (executable)
index 5fc7611..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env ruby
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-APP_ROOT = File.expand_path('..', __dir__)
-Dir.chdir(APP_ROOT) do
-  begin
-    exec "yarnpkg #{ARGV.join(" ")}"
-  rescue Errno::ENOENT
-    $stderr.puts "Yarn executable was not detected in the system."
-    $stderr.puts "Download Yarn at https://yarnpkg.com/en/docs/install"
-    exit 1
-  end
-end
index 42bf4da24bbf71900d403686cc954badd57660e0..2880af2d6d3c2db113977420498c20a25bffd90b 100644 (file)
@@ -27,6 +27,8 @@ end
 
 module ArvadosWorkbench
   class Application < Rails::Application
+    # The following is to avoid SafeYAML's warning message
+    SafeYAML::OPTIONS[:default_mode] = :safe
 
     require_relative "arvados_config.rb"
 
index 007e8d0687c6b290633ca809b2bf4396ca657a2d..c5cc544b9b8717fc70b51c52c1c4b35ce03a16aa 100644 (file)
@@ -31,19 +31,25 @@ clusterID, clusterConfig = confs["Clusters"].first
 $arvados_config_defaults = clusterConfig
 $arvados_config_defaults["ClusterID"] = clusterID
 
-# Load the global config file
-Open3.popen2("arvados-server", "config-dump", "-skip-legacy") do |stdin, stdout, status_thread|
-  confs = YAML.load(stdout, deserialize_symbols: false)
-  if confs && !confs.empty?
-    # config-dump merges defaults with user configuration, so every
-    # key should be set.
-    clusterID, clusterConfig = confs["Clusters"].first
-    $arvados_config_global = clusterConfig
-    $arvados_config_global["ClusterID"] = clusterID
-  else
-    # config-dump failed, assume we will be loading from legacy
-    # application.yml, initialize with defaults.
-    $arvados_config_global = $arvados_config_defaults.deep_dup
+if ENV["ARVADOS_CONFIG"] == "none"
+  # Don't load config. This magic value is set by packaging scripts so
+  # they can run "rake assets:precompile" without a real config.
+  $arvados_config_global = $arvados_config_defaults.deep_dup
+else
+  # Load the global config file
+  Open3.popen2("arvados-server", "config-dump", "-skip-legacy") do |stdin, stdout, status_thread|
+    confs = YAML.load(stdout, deserialize_symbols: false)
+    if confs && !confs.empty?
+      # config-dump merges defaults with user configuration, so every
+      # key should be set.
+      clusterID, clusterConfig = confs["Clusters"].first
+      $arvados_config_global = clusterConfig
+      $arvados_config_global["ClusterID"] = clusterID
+    else
+      # config-dump failed, assume we will be loading from legacy
+      # application.yml, initialize with defaults.
+      $arvados_config_global = $arvados_config_defaults.deep_dup
+    end
   end
 end
 
@@ -189,7 +195,8 @@ ArvadosWorkbench::Application.configure do
   ConfigLoader.copy_into_config $arvados_config, config
   ConfigLoader.copy_into_config $remaining_config, config
   secrets.secret_key_base = $arvados_config["Workbench"]["SecretKeyBase"]
-  ConfigValidators.validate_wb2_url_config()
-  ConfigValidators.validate_download_config()
-
+  if ENV["ARVADOS_CONFIG"] != "none"
+    ConfigValidators.validate_wb2_url_config()
+    ConfigValidators.validate_download_config()
+  end
 end
index 35549d9cd3b8673c0ed13fbf23386bdab6798014..6ac2539f8e68e7c229cf7c1e4859674605c3c1d4 100644 (file)
@@ -15,4 +15,3 @@ postinst.sh lets the early parts define a few hooks to control behavior:
 * After it installs the core configuration files (database.yml, application.yml, and production.rb) to /etc/arvados/server, it calls setup_extra_conffiles.  By default this is a noop function (in step2.sh).
 * Before it restarts nginx, it calls setup_before_nginx_restart.  By default this is a noop function (in step2.sh).  API server defines this to set up the internal git repository, if necessary.
 * $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database.  API server uses db:structure:load.  Workbench doesn't set this, which causes the postinst to skip all database work.
-* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task.
index 3eb2d2c5e0c2f58d2099f131c0b3ecd0d8e3078b..bcd7a27c85125c729fb304ef7a2e70d105950044 100644 (file)
@@ -226,19 +226,15 @@ configure_version() {
       prepare_database
   fi
 
-  if [ 11 = "$RAILSPKG_SUPPORTS_CONFIG_CHECK$APPLICATION_READY" ]; then
+  if [ -e /etc/arvados/config.yml ]; then
+      # warn about config errors (deprecated/removed keys from
+      # previous version, etc)
       run_and_report "Checking configuration for completeness" \
-          $COMMAND_PREFIX bundle exec rake config:check || APPLICATION_READY=0
-  fi
-
-  # precompile assets; thankfully this does not take long
-  if [ "$APPLICATION_READY" = "1" ]; then
-      run_and_report "Precompiling assets" \
-          $COMMAND_PREFIX bundle exec rake assets:precompile -q -s 2>/dev/null \
-          || APPLICATION_READY=0
+                     $COMMAND_PREFIX bundle exec rake config:check || APPLICATION_READY=0
   else
-      echo "Precompiling assets... skipped."
+      APPLICATION_READY=0
   fi
+
   chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp
 
   setup_before_nginx_restart
index 482d27a6a07502b0fe03d1f14019fb6767088918..41c9cd71e366ff5d14973638ad48547bcc98ba3a 100644 (file)
@@ -11,7 +11,7 @@ if [ -z "$INSTALL_PATH" ]; then
 
 PACKAGE BUILD ERROR: $0 is missing package metadata.
 
-This package is buggy.  Please mail <support@curoverse.com> to let
+This package is buggy.  Please mail <packaging@arvados.org> to let
 us know the name and version number of the package you tried to
 install, and we'll get it fixed.
 
@@ -23,7 +23,6 @@ RELEASE_PATH=$INSTALL_PATH/current
 RELEASE_CONFIG_PATH=$RELEASE_PATH/config
 SHARED_PATH=$INSTALL_PATH/shared
 
-RAILSPKG_SUPPORTS_CONFIG_CHECK=${RAILSPKG_SUPPORTS_CONFIG_CHECK:-1}
 if ! type setup_extra_conffiles >/dev/null 2>&1; then
     setup_extra_conffiles() { return; }
 fi
index f9db56a5ff56d69ed5e3095b386eaaf6883ac5a4..e231a83df8d22e291a7d048bc8fa3d00c136a109 100755 (executable)
@@ -401,8 +401,8 @@ if [[ "$?" == "0" ]] ; then
       mv /tmp/x /etc/arvados/config.yml
       perl -p -i -e 'BEGIN{undef $/;} s/WebDAV(.*?):\n( *)ExternalURL: ""/WebDAV$1:\n$2ExternalURL: "example.com"/g' /etc/arvados/config.yml
 
-      RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake npm:install >"$STDOUT_IF_DEBUG"
-      RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake assets:precompile >"$STDOUT_IF_DEBUG"
+      ARVADOS_CONFIG=none RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake npm:install >"$STDOUT_IF_DEBUG"
+      ARVADOS_CONFIG=none RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake assets:precompile >"$STDOUT_IF_DEBUG"
 
       # Remove generated configuration files so they don't go in the package.
       rm -rf /etc/arvados/
index 03e5992d15e958d26beaf3885d0da114ddb2979d..52b71287b2c27af49537c9461255a1c6d6bac52f 100644 (file)
@@ -62,6 +62,10 @@ h3. Centos7 Python 3 dependency upgraded to python3
 
 Now that Python 3 is part of the base repository in CentOS 7, the Python 3 dependency for Centos7 Arvados packages was changed from SCL rh-python36 to python3.
 
+h3. ForceLegacyAPI14 option removed
+
+The ForceLegacyAPI14 configuration option has been removed. In the unlikely event it is mentioned in your config file, remove it to avoid "deprecated/unknown config" warning logs.
+
 h2(#v2_1_0). v2.1.0 (2020-10-13)
 
 "Upgrading from 2.0.0":#v2_0_0
index 4ba153faf9e1e116eda7671e5c87ae72380630fd..827d65db28d4ad99089f42d47a2e07d94cf941ef 100644 (file)
@@ -9,91 +9,163 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-# "Install Saltstack":#saltstack
-# "Install dependencies":#dependencies
-# "Install Arvados using Saltstack":#saltstack
-# "DNS configuration":#final_steps
+# "Hosts preparation":#hosts_preparation
+## "Hosts setup using terraform (experimental)":#hosts_setup_using_terraform
+## "Create a compute image":#create_a_compute_image
+# "Multi host install using the provision.sh script":#multi_host
+# "Choose the desired configuration":#choose_configuration
+## "Multiple hosts / multiple hostnames":#multi_host_multi_hostnames
+## "Further customization of the installation (modifying the salt pillars and states)":#further_customization
+# "Installation order":#installation_order
+# "Run the provision.sh script":#run_provision_script
 # "Initial user and login":#initial_user
+# "Test the installed cluster running a simple workflow":#test_install
 
-h2(#saltstack). Install Saltstack
+h2(#hosts_preparation). Hosts preparation
 
-If you already have a Saltstack environment you can skip this section.
+In order to run Arvados on a multi-host installation, there are a few requirements that your infrastructure has to fulfill.
 
-The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+These instructions explain how to setup a multi-host environment that is suitable for production use of Arvados.
 
+We suggest distributing the Arvados components in the following way, creating at least 6 hosts:
+
+# Database server:
+## postgresql server
+# API node:
+## arvados api server
+## arvados controller
+## arvados websocket
+## arvados cloud dispatcher
+# WORKBENCH node:
+## arvados workbench
+## arvados workbench2
+# KEEPPROXY node:
+## arvados keepproxy
+## arvados keepweb
+# KEEPSTOREs (at least 2)
+## arvados keepstore
+# SHELL node (optional):
+## arvados shell
+
+Note that these hosts can be virtual machines in your infrastructure and they don't need to be physical machines.
+
+h3(#hosts_setup_using_terraform). Hosts setup using terraform (experimental)
+
+We added a few "terraform":https://terraform.io/ scripts (https://github.com/arvados/arvados/tree/master/tools/terraform) to let you create these instances easier.
+Check "the Arvados terraform documentation":/doc/install/terraform.html for more details.
+
+h2(#multi_host). Multi host install using the provision.sh script
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+This procedure will install all the main Arvados components to get you up and running in a multi host environment.
+
+We suggest you to use the @provision.sh@ script to deploy Arvados, which is implemented with the @arvados-formula@ in a Saltstack master-less setup. After setting up a few variables in a config file (next step), you'll be ready to run it and get Arvados deployed.
+
+h3(#create_a_compute_image). Create a compute image
+
+In a multi-host installation, containers are dispatched in docker daemons running in the <i>compute instances</i>, which need some special setup. We provide a "compute image builder script":https://github.com/arvados/arvados/tree/master/tools/compute-images that you can use to build a template image following "these instructions":https://doc.arvados.org/main/install/crunch2-cloud/install-compute-node.html . Once you have that image created, you can use the image reference in the Arvados configuration in the next steps.
+
+h2(#choose_configuration). Choose the desired configuration
+
+For documentation's sake, we will use the cluster name <i>arva2</i> and the domain <i>arv.local</i>. If you don't change them as required in the next steps, installation won't proceed.
+
+We will try to provide a few Arvados' multi host installation configurations examples for different infrastructure providers. Currently only AWS is available but they can be used with almost any provider with little changes.
+
+You need to copy one of the example configuration files and directory, and edit them to suit your needs.
+
+h3(#multi_host_multi_hostnames). Multiple hosts / multiple hostnames
 <notextile>
-<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
-sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+<pre><code>cp local.params.example.multiple_hosts local.params
+cp -r config_examples/multi_host/aws local_config_dir
 </code></pre>
 </notextile>
 
-For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+Edit the variables in the <i>local.params</i> file. Pay attention to the <b>*_INT_IP, *_TOKEN</b> and <b>*KEY</b> variables. Those variables will be used to do a search and replace on the <i>pillars/*</i> in place of any matching __VARIABLE__.
 
-h2(#dependencies). Install dependencies
+The <i>multi_host</i> include LetsEncrypt salt code to automatically request and install the certificates for the public-facing hosts (API, Workbench) so it will need the hostnames to be reachable from the Internet. If this cluster will not be the case, please set the variable <i>USE_LETSENCRYPT=no</i>.
 
-Arvados depends in a few applications and packages (postgresql, nginx+passenger, ruby) that can also be installed using their respective Saltstack formulas.
+h3(#further_customization). Further customization of the installation (modifying the salt pillars and states)
 
-The formulas we use are:
+You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that can be retrieved as output of the terraform run.
 
-* "postgres":https://github.com/saltstack-formulas/postgres-formula.git
-* "nginx":https://github.com/saltstack-formulas/nginx-formula.git
-* "docker":https://github.com/saltstack-formulas/docker-formula.git
-* "locale":https://github.com/saltstack-formulas/locale-formula.git
+Any extra <i>state</i> file you add under <i>local_config_dir/states</i> will be added to the salt run and applied to the hosts.
 
-There are example Salt pillar files for each of those formulas in the "arvados-formula's test/salt/pillar/examples":https://github.com/saltstack-formulas/arvados-formula/tree/master/test/salt/pillar/examples directory. As they are, they allow you to get all the main Arvados components up and running.
+h2(#installation_order). Installation order
 
-h2(#saltstack). Install Arvados using Saltstack
-
-This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+A few Arvados nodes need to be installed in certain order. The required order is
 
-The Arvados formula we maintain is located in the Saltstack's community repository of formulas:
+#. Database
+#. API server
+#. The other nodes can be installed in any order after the two above
 
-* "arvados-formula":https://github.com/saltstack-formulas/arvados-formula.git
+h2(#run_provision_script). Run the provision.sh script
 
-The @development@ version lives in our own repository
+When you finished customizing the configuration, you are ready to copy the files to the hosts and run the @provision.sh@ script. The script allows you to specify the <i>role/s</i> a node will have and it will install only the Arvados components required for such role. The general format of the command is:
 
-* "arvados-formula development":https://github.com/arvados/arvados-formula.git
-
-This last one might break from time to time, as we try and add new features. Use with caution.
+<notextile>
+<pre><code>scp -r provision.sh local* user@host:
+ssh user@host sudo ./provision.sh --roles comma,separated,list,of,roles,to,apply
+</code></pre>
+</notextile>
 
-As much as possible, we try to keep it up to date, with example pillars to help you deploy Arvados.
+and wait for it to finish.
 
-For those familiar with Saltstack, the process to get it deployed is similar to any other formula:
+If everything goes OK, you'll get some final lines stating something like:
 
-1. Fork/copy the formula to your Salt master host.
-2. Edit the Arvados, nginx, postgres, locale and docker pillars to match your desired configuration.
-3. Run a @state.apply@ to get it deployed.
+<notextile>
+<pre><code>arvados: Succeeded: 109 (changed=9)
+arvados: Failed:      0
+</code></pre>
+</notextile>
 
-h2(#final_steps). DNS configuration
+The distribution of role as described above can be applied running these commands:
 
-After the setup is done, you need to set up your DNS to be able to access the cluster's nodes.
+#. Database
+<notextile>
+<pre><code>scp -r provision.sh local* user@host:
+ssh user@host sudo ./provision.sh --config local.params --roles database
+</code></pre>
+</notextile>
 
-The simplest way to do this is to add entries in the @/etc/hosts@ file of every host:
+#. API
+<notextile>
+<pre><code>scp -r provision.sh local* user@host:
+ssh user@host sudo ./provision.sh --config local.params --roles api,controller,websocket,dispatcher
+</code></pre>
+</notextile>
 
+#. Keepstore/s
 <notextile>
-<pre><code>export CLUSTER="arva2"
-export DOMAIN="arv.local"
-
-echo A.B.C.a  api ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.b  keep keep.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.c  keep0 keep0.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.d  collections collections.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.e  download download.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.f  ws ws.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.g  workbench workbench.${CLUSTER}.${DOMAIN} >> /etc/hosts
-echo A.B.C.h  workbench2 workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+<pre><code>scp -r provision.sh local* user@host:
+ssh user@host sudo ./provision.sh --config local.params --roles keepstore
 </code></pre>
 </notextile>
 
-Replacing in each case de @A.B.C.x@ IP with the corresponding IP of the node.
+#. Workbench
+<notextile>
+<pre><code>scp -r provision.sh local* user@host:
+ssh user@host sudo ./provision.sh --config local.params --roles workbench,workbench2
+</code></pre>
+</notextile>
 
-If your infrastructure uses another DNS service setup, add the corresponding entries accordingly.
+#. Keepproxy / Keepweb
+<notextile>
+<pre><code>scp -r provision.sh local* user@host:
+ssh user@host sudo ./provision.sh --config local.params --roles keepproxy,keepweb
+</code></pre>
+</notextile>
 
-h2(#initial_user). Initial user and login
+#. Shell (here we copy the CLI test workflow too)
+<notextile>
+<pre><code>scp -r provision.sh local* tests user@host:
+ssh user@host sudo ./provision.sh --config local.params --roles shell
+</code></pre>
+</notextile>
 
-At this point you should be able to log into the Arvados cluster.
+h2(#initial_user). Initial user and login 
 
-If you did not change the defaults, the initial URL will be:
+At this point you should be able to log into the Arvados cluster. The initial URL will be:
 
 * https://workbench.arva2.arv.local
 
@@ -103,8 +175,100 @@ or, in general, the url format will be:
 
 By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
 
-Assuming you didn't change the defaults, the initial credentials are:
+Assuming you didn't change these values in the @local.params@ file, the initial credentials are:
 
 * User: 'admin'
 * Password: 'password'
 * Email: 'admin@arva2.arv.local'
+
+h2(#test_install). Test the installed cluster running a simple workflow
+
+If you followed the instructions above, the @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@ directory in the @shell@ node. If you want to run it, just ssh to the node, change to that directory and run:
+
+<notextile>
+<pre><code>cd /tmp/cluster_tests
+sudo /run-test.sh
+</code></pre>
+</notextile>
+
+It will create a test user (by default, the same one as the admin user), upload a small workflow and run it. If everything goes OK, the output should similar to this (some output was shortened for clarity):
+
+<notextile>
+<pre><code>Creating Arvados Standard Docker Images project
+Arvados project uuid is 'arva2-j7d0g-0prd8cjlk6kfl7y'
+{
+ ...
+ "uuid":"arva2-o0j2j-n4zu4cak5iifq2a",
+ "owner_uuid":"arva2-tpzed-000000000000000",
+ ...
+}
+Uploading arvados/jobs' docker image to the project
+2.1.1: Pulling from arvados/jobs
+8559a31e96f4: Pulling fs layer
+...
+Status: Downloaded newer image for arvados/jobs:2.1.1
+docker.io/arvados/jobs:2.1.1
+2020-11-23 21:43:39 arvados.arv_put[32678] INFO: Creating new cache file at /home/vagrant/.cache/arvados/arv-put/c59256eda1829281424c80f588c7cc4d
+2020-11-23 21:43:46 arvados.arv_put[32678] INFO: Collection saved as 'Docker image arvados jobs:2.1.1 sha256:0dd50'
+arva2-4zz18-1u5pvbld7cvxuy2
+Creating initial user ('admin')
+Setting up user ('admin')
+{
+ "items":[
+  {
+   ...
+   "owner_uuid":"arva2-tpzed-000000000000000",
+   ...
+   "uuid":"arva2-o0j2j-1ownrdne0ok9iox"
+  },
+  {
+   ...
+   "owner_uuid":"arva2-tpzed-000000000000000",
+   ...
+   "uuid":"arva2-o0j2j-1zbeyhcwxc1tvb7"
+  },
+  {
+   ...
+   "email":"admin@arva2.arv.local",
+   ...
+   "owner_uuid":"arva2-tpzed-000000000000000",
+   ...
+   "username":"admin",
+   "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
+   ...
+  }
+ ],
+ "kind":"arvados#HashList"
+}
+Activating user 'admin'
+{
+ ...
+ "email":"admin@arva2.arv.local",
+ ...
+ "username":"admin",
+ "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
+ ...
+}
+Running test CWL workflow
+INFO /usr/bin/cwl-runner 2.1.1, arvados-python-client 2.1.1, cwltool 3.0.20200807132242
+INFO Resolved 'hasher-workflow.cwl' to 'file:///tmp/cluster_tests/hasher-workflow.cwl'
+...
+INFO Using cluster arva2 (https://arva2.arv.local:8443/)
+INFO Upload local files: "test.txt"
+INFO Uploaded to ea34d971b71d5536b4f6b7d6c69dc7f6+50 (arva2-4zz18-c8uvwqdry4r8jao)
+INFO Using collection cache size 256 MiB
+INFO [container hasher-workflow.cwl] submitted container_request arva2-xvhdp-v1bkywd58gyocwm
+INFO [container hasher-workflow.cwl] arva2-xvhdp-v1bkywd58gyocwm is Final
+INFO Overall process status is success
+INFO Final output collection d6c69a88147dde9d52a418d50ef788df+123
+{
+    "hasher_out": {
+        "basename": "hasher3.md5sum.txt",
+        "class": "File",
+        "location": "keep:d6c69a88147dde9d52a418d50ef788df+123/hasher3.md5sum.txt",
+        "size": 95
+    }
+}
+INFO Final process status is success
+</code></pre>
+</notextile>
index 48b26e83aa4f3a6af5d6863aea2d9fffc3853a6a..f2a8ee5704dc08625a541678e2a660ee440a1714 100644 (file)
@@ -9,67 +9,94 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-# "Install Saltstack":#saltstack
 # "Single host install using the provision.sh script":#single_host
-# "Final steps":#final_steps
-## "DNS configuration":#dns_configuration
-## "Install root certificate":#ca_root_certificate
+# "Choose the desired configuration":#choose_configuration
+## "Single host / single hostname":#single_host_single_hostnames
+## "Single host / multiple hostnames (Alternative configuration)":#single_host_multiple_hostnames
+## "Further customization of the installation (modifying the salt pillars and states)":#further_customization
+# "Run the provision.sh script":#run_provision_script
+# "Final configuration steps":#final_steps
+## "Install the CA root certificate (required in both alternatives)":#ca_root_certificate
+## "DNS configuration (single host / multiple hostnames)":#single_host_multiple_hostnames_dns_configuration
 # "Initial user and login":#initial_user
 # "Test the installed cluster running a simple workflow":#test_install
 
-h2(#saltstack). Install Saltstack
+h2(#single_host). Single host install using the provision.sh script
+
+<b>NOTE: The single host installation is not recommended for production use.</b>
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+This procedure will install all the main Arvados components to get you up and running in a single host. The whole installation procedure takes somewhere between 15 to 60 minutes, depending on the host resources and its network bandwidth. As a reference, on a virtual machine with 1 core and 1 GB RAM, it takes ~25 minutes to do the initial install.
+
+We suggest you to use the @provision.sh@ script to deploy Arvados, which is implemented with the @arvados-formula@ in a Saltstack master-less setup. After setting up a few variables in a config file (next step), you'll be ready to run it and get Arvados deployed.
 
-If you already have a Saltstack environment you can skip this section.
+h2(#choose_configuration). Choose the desired configuration
 
-The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+For documentation's sake, we will use the cluster name <i>arva2</i> and the domain <i>arv.local</i>. If you don't change them as required in the next steps, installation won't proceed.
 
+Arvados' single host installation can be done in two fashions:
+
+* Using a single hostname, assigning <i>a different port (other than 443) for each user-facing service</i>: This choice is easier to setup, but the user will need to know the port/s for the different services she wants to connect to.
+* Using multiple hostnames on the same IP: this setup involves a few extra steps but each service will have a meaningful hostname so it will make easier to access them later.
+
+Once you decide which of these choices you prefer, copy one the two example configuration files and directory, and edit them to suit your needs.
+
+h3(#single_host_single_hostnames). Single host / single hostname
 <notextile>
-<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
-sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+<pre><code>cp local.params.example.single_host_single_hostname local.params
+cp -r config_examples/single_host/single_hostname local_config_dir
 </code></pre>
 </notextile>
 
-For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+Edit the variables in the <i>local.params</i> file. Pay attention to the <b>*_PORT, *_TOKEN</b> and <b>*KEY</b> variables.
 
-h2(#single_host). Single host install using the provision.sh script
+h3(#single_host_multiple_hostnames). Single host / multiple hostnames (Alternative configuration)
+<notextile>
+<pre><code>cp local.params.example.single_host_multiple_hostnames local.params
+cp -r config_examples/single_host/multiple_hostnames local_config_dir
+</code></pre>
+</notextile>
 
-This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+Edit the variables in the <i>local.params</i> file.
 
-Use the @provision.sh@ script to deploy Arvados, which is implemented with the @arvados-formula@ in a Saltstack master-less setup:
+h3(#further_customization). Further customization of the installation (modifying the salt pillars and states)
 
-* edit the variables at the very beginning of the file,
-* run the script as root
-* wait for it to finish
+If you want or need further customization, you can edit the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> one. Any extra <i>state</i> file you add under <i>local_config_dir/states</i> will be added to the salt run and applied to the host.
 
-This will install all the main Arvados components to get you up and running. The whole installation procedure takes somewhere between 15 to 60 minutes, depending on the host and your network bandwidth. On a virtual machine with 1 core and 1 GB RAM, it takes ~25 minutes to do the initial install.
+h2(#run_provision_script). Run the provision.sh script
 
-If everything goes OK, you'll get some final lines stating something like:
+When you finished customizing the configuration, you are ready to copy the files to the host (if needed) and run the @provision.sh@ script:
 
 <notextile>
-<pre><code>arvados: Succeeded: 109 (changed=9)
-arvados: Failed:      0
+<pre><code>scp -r provision.sh local* tests user@host:
+ssh user@host sudo ./provision.sh
 </code></pre>
 </notextile>
 
-h2(#final_steps). Final configuration steps
+or, if you saved the @local.params@ in another directory or with some other name
 
-h3(#dns_configuration). DNS configuration
+<notextile>
+<pre><code>scp -r provision.sh local* tests user@host:
+ssh user@host sudo ./provision.sh -c /path/to/your/local.params.file
+</code></pre>
+</notextile>
 
-After the setup is done, you need to set up your DNS to be able to access the cluster.
+and wait for it to finish.
 
-The simplest way to do this is to edit your @/etc/hosts@ file (as root):
+If everything goes OK, you'll get some final lines stating something like:
 
 <notextile>
-<pre><code>export CLUSTER="arva2"
-export DOMAIN="arv.local"
-export HOST_IP="127.0.0.2"    # This is valid either if installing in your computer directly
-                              # or in a Vagrant VM. If you're installing it on a remote host
-                              # just change the IP to match that of the host.
-echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+<pre><code>arvados: Succeeded: 109 (changed=9)
+arvados: Failed:      0
 </code></pre>
 </notextile>
 
-h3(#ca_root_certificate). Install root certificate
+h2(#final_steps). Final configuration steps
+
+Once the deployment went OK, you'll need to perform a few extra steps in your local browser/host to access the cluster.
+
+h3(#ca_root_certificate). Install the CA root certificate (required in both alternatives)
 
 Arvados uses SSL to encrypt communications. Its UI uses AJAX which will silently fail if the certificate is not valid or signed by an unknown Certification Authority.
 
@@ -102,11 +129,25 @@ To access your Arvados instance using command line clients (such as arv-get and
 </code></pre>
 </notextile>
 
-h2(#initial_user). Initial user and login
+h3(#single_host_multiple_hostnames_dns_configuration). DNS configuration (single host / multiple hostnames)
+
+When using multiple hostnames, after the setup is done, you need to set up your DNS to be able to access the cluster.
+
+If you don't have access to the domain's DNS to add the required entries, the simplest way to do it is to edit your @/etc/hosts@ file (as root):
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+export HOST_IP="127.0.0.2"    # This is valid either if installing in your computer directly
+                              # or in a Vagrant VM. If you're installing it on a remote host
+                              # just change the IP to match that of the host.
+echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
 
-At this point you should be able to log into the Arvados cluster.
+h2(#initial_user). Initial user and login 
 
-If you changed nothing in the @provision.sh@ script, the initial URL will be:
+At this point you should be able to log into the Arvados cluster. The initial URL will be:
 
 * https://workbench.arva2.arv.local
 
@@ -116,7 +157,7 @@ or, in general, the url format will be:
 
 By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
 
-Assuming you didn't change these values in the @provision.sh@ script, the initial credentials are:
+Assuming you didn't change these values in the @local.params@ file, the initial credentials are:
 
 * User: 'admin'
 * Password: 'password'
@@ -124,15 +165,15 @@ Assuming you didn't change these values in the @provision.sh@ script, the initia
 
 h2(#test_install). Test the installed cluster running a simple workflow
 
-The @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@. If you want to run it, just change to that directory and run:
+The @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@ directory in the node. If you want to run it, just ssh to the node, change to that directory and run:
 
 <notextile>
 <pre><code>cd /tmp/cluster_tests
-./run-test.sh
+sudo ./run-test.sh
 </code></pre>
 </notextile>
 
-It will create a test user, upload a small workflow and run it. If everything goes OK, the output should similar to this (some output was shortened for clarity):
+It will create a test user (by default, the same one as the admin user), upload a small workflow and run it. If everything goes OK, the output should similar to this (some output was shortened for clarity):
 
 <notextile>
 <pre><code>Creating Arvados Standard Docker Images project
index 8f5ecc8c650f8a9f6d82f7ca8c3f6e256e2cb200..a9ee08fb886d0747ff5ffda161f323996d664165 100644 (file)
@@ -10,20 +10,35 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 # "Introduction":#introduction
-# "Choose an installation method":#installmethod
+# "Install Saltstack":#saltstack
+# "Choose an Arvados installation configuration":#installconfiguration
 
 h2(#introduction). Introduction
 
-To ease the installation of the various Arvados components, we have developed a "Saltstack":https://www.saltstack.com/ 's "arvados-formula":https://github.com/saltstack-formulas/arvados-formula which can help you get an Arvados cluster up and running.
+To ease the installation of the various Arvados components, we have developed a "Saltstack":https://www.saltstack.com/ 's "arvados-formula":https://github.com/arvados/arvados-formula.git which can help you get an Arvados cluster up and running.
 
 Saltstack is a Python-based, open-source software for event-driven IT automation, remote task execution, and configuration management. It can be used in a master/minion setup or master-less.
 
-This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+This is a package-based installation method. The Salt scripts to install and configure Arvados using this formula are available at the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
 
-h2(#installmethod). Choose an installation method
+h2(#saltstack). Install Saltstack
 
-The salt formulas can be used in different ways. Choose one of these three options to install Arvados:
+If you already have a Saltstack environment or you plan to use the @provision.sh@ script we provide, you can skip this section.
+
+The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+
+<notextile>
+<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+</code></pre>
+</notextile>
+
+For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+
+h2(#installconfiguration). Choose an Arvados installation configuration
+
+The salt formula can be used in a few different ways. Choose one of these three options to install Arvados:
 
-* "Use Vagrant to install Arvados in a virtual machine":salt-vagrant.html
 * "Arvados on a single host":salt-single-host.html
 * "Arvados across multiple hosts":salt-multi-host.html
+* "Use Vagrant to install Arvados in a virtual machine":salt-vagrant.html
index 1e0de74024f52851ebe4eb08c0414617d0bdc7db..071c95006c9b305b1f47737bbb6eab588961785c 100644 (file)
@@ -14,6 +14,8 @@ import (
        "fmt"
        "math/big"
        "sync"
+       "sync/atomic"
+       "time"
 
        "git.arvados.org/arvados.git/lib/cloud"
        "git.arvados.org/arvados.git/sdk/go/arvados"
@@ -21,6 +23,7 @@ import (
        "github.com/aws/aws-sdk-go/aws/credentials"
        "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
        "github.com/aws/aws-sdk-go/aws/ec2metadata"
+       "github.com/aws/aws-sdk-go/aws/request"
        "github.com/aws/aws-sdk-go/aws/session"
        "github.com/aws/aws-sdk-go/service/ec2"
        "github.com/sirupsen/logrus"
@@ -30,6 +33,11 @@ import (
 // Driver is the ec2 implementation of the cloud.Driver interface.
 var Driver = cloud.DriverFunc(newEC2InstanceSet)
 
+const (
+       throttleDelayMin = time.Second
+       throttleDelayMax = time.Minute
+)
+
 type ec2InstanceSetConfig struct {
        AccessKeyID      string
        SecretAccessKey  string
@@ -50,12 +58,14 @@ type ec2Interface interface {
 }
 
 type ec2InstanceSet struct {
-       ec2config     ec2InstanceSetConfig
-       instanceSetID cloud.InstanceSetID
-       logger        logrus.FieldLogger
-       client        ec2Interface
-       keysMtx       sync.Mutex
-       keys          map[string]string
+       ec2config              ec2InstanceSetConfig
+       instanceSetID          cloud.InstanceSetID
+       logger                 logrus.FieldLogger
+       client                 ec2Interface
+       keysMtx                sync.Mutex
+       keys                   map[string]string
+       throttleDelayCreate    atomic.Value
+       throttleDelayInstances atomic.Value
 }
 
 func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
@@ -220,7 +230,7 @@ func (instanceSet *ec2InstanceSet) Create(
        }
 
        rsv, err := instanceSet.client.RunInstances(&rii)
-
+       err = wrapError(err, &instanceSet.throttleDelayCreate)
        if err != nil {
                return nil, err
        }
@@ -242,6 +252,7 @@ func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances
        dii := &ec2.DescribeInstancesInput{Filters: filters}
        for {
                dio, err := instanceSet.client.DescribeInstances(dii)
+               err = wrapError(err, &instanceSet.throttleDelayInstances)
                if err != nil {
                        return nil, err
                }
@@ -328,3 +339,33 @@ func (inst *ec2Instance) RemoteUser() string {
 func (inst *ec2Instance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {
        return cloud.ErrNotImplemented
 }
+
+type rateLimitError struct {
+       error
+       earliestRetry time.Time
+}
+
+func (err rateLimitError) EarliestRetry() time.Time {
+       return err.earliestRetry
+}
+
+func wrapError(err error, throttleValue *atomic.Value) error {
+       if request.IsErrorThrottle(err) {
+               // Back off exponentially until an upstream call
+               // either succeeds or returns a non-throttle error.
+               d, _ := throttleValue.Load().(time.Duration)
+               d = d*3/2 + time.Second
+               if d < throttleDelayMin {
+                       d = throttleDelayMin
+               } else if d > throttleDelayMax {
+                       d = throttleDelayMax
+               }
+               throttleValue.Store(d)
+               return rateLimitError{error: err, earliestRetry: time.Now().Add(d)}
+       } else if err != nil {
+               throttleValue.Store(time.Duration(0))
+               return err
+       }
+       throttleValue.Store(time.Duration(0))
+       return nil
+}
index 6aa6e857ff59b278aa3a54292ab461527502d84b..e7319a0cb66d7fe2e0132c9092f88ab5d5714a28 100644 (file)
@@ -245,3 +245,5 @@ func (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {
                c.Check(i.Destroy(), check.IsNil)
        }
 }
+
+var TestRateLimitErrorInterface cloud.RateLimitError = rateLimitError{}
index bcaa692ff48cc10f5a2631b84b8fc0ed361a8436..ca627d07e8147a7becead4322855d0eee508b266 100644 (file)
@@ -571,9 +571,6 @@ Clusters:
         # ID > Web application) and add your controller's /login URL
         # (e.g., "https://zzzzz.example.com/login") as an authorized
         # redirect URL.
-        #
-        # Incompatible with ForceLegacyAPI14. ProviderAppID must be
-        # blank.
         ClientID: ""
         ClientSecret: ""
 
@@ -1481,13 +1478,6 @@ Clusters:
       # this blank.
       SSHHelpHostSuffix: ""
 
-    # Bypass new (Arvados 1.5) API implementations, and hand off
-    # requests directly to Rails instead. This can provide a temporary
-    # workaround for clients that are incompatible with the new API
-    # implementation. Note that it also disables some new federation
-    # features and will be removed in a future release.
-    ForceLegacyAPI14: false
-
 # (Experimental) Restart services automatically when config file
 # changes are detected. Only supported by `arvados-server boot` in
 # dev/test mode.
index b6531c59d87dd329a24d0b43f22dfd738f9208d5..5c0e9f270071b81792179c525cb47fa567955104 100644 (file)
@@ -130,7 +130,6 @@ var whitelist = map[string]bool{
        "Containers.SupportedDockerImageFormats":              true,
        "Containers.SupportedDockerImageFormats.*":            true,
        "Containers.UsePreemptibleInstances":                  true,
-       "ForceLegacyAPI14":                                    false,
        "Git":                                                 false,
        "InstanceTypes":                                       true,
        "InstanceTypes.*":                                     true,
index 4787f4fab2475e7870b85ce6597a3dc215347134..1032663973991d2a3f6cc786e1818e5ff28192c0 100644 (file)
@@ -577,9 +577,6 @@ Clusters:
         # ID > Web application) and add your controller's /login URL
         # (e.g., "https://zzzzz.example.com/login") as an authorized
         # redirect URL.
-        #
-        # Incompatible with ForceLegacyAPI14. ProviderAppID must be
-        # blank.
         ClientID: ""
         ClientSecret: ""
 
@@ -1487,13 +1484,6 @@ Clusters:
       # this blank.
       SSHHelpHostSuffix: ""
 
-    # Bypass new (Arvados 1.5) API implementations, and hand off
-    # requests directly to Rails instead. This can provide a temporary
-    # workaround for clients that are incompatible with the new API
-    # implementation. Note that it also disables some new federation
-    # features and will be removed in a future release.
-    ForceLegacyAPI14: false
-
 # (Experimental) Restart services automatically when config file
 # changes are detected. Only supported by ` + "`" + `arvados-server boot` + "`" + ` in
 # dev/test mode.
index ad214b160591928892e75c0f6e1e805d4c9e3506..01990620f6094dd10063df7e5e9410e082cade36 100644 (file)
@@ -62,10 +62,9 @@ func (s *AuthSuite) SetUpTest(c *check.C) {
        s.fakeProvider.ValidClientSecret = "test#client/secret"
 
        cluster := &arvados.Cluster{
-               ClusterID:        "zhome",
-               PostgreSQL:       integrationTestCluster().PostgreSQL,
-               ForceLegacyAPI14: forceLegacyAPI14,
-               SystemRootToken:  arvadostest.SystemRootToken,
+               ClusterID:       "zhome",
+               PostgreSQL:      integrationTestCluster().PostgreSQL,
+               SystemRootToken: arvadostest.SystemRootToken,
        }
        cluster.TLS.Insecure = true
        cluster.API.MaxItemsPerResponse = 1000
index 50f7eea42b3fe162de4282f009d31f162df5cd4f..fdc4d96cfaa90b3e28dd2048c2c5bd0f73bf9dc5 100644 (file)
@@ -86,7 +86,7 @@ func (s *FederationSuite) addDirectRemote(c *check.C, id string, backend backend
 
 func (s *FederationSuite) addHTTPRemote(c *check.C, id string, backend backend) {
        srv := httpserver.Server{Addr: ":"}
-       srv.Handler = router.New(backend, nil)
+       srv.Handler = router.New(backend, router.Config{})
        c.Check(srv.Start(), check.IsNil)
        s.cluster.RemoteClusters[id] = arvados.RemoteCluster{
                Scheme: "http",
index e3b2291bcef4481ff37159c2d3c8b79b744b03e6..f4cadd821b3d008d7ad8cdf68a402d9e7bd05092 100644 (file)
@@ -57,9 +57,8 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
        c.Assert(s.remoteMock.Start(), check.IsNil)
 
        cluster := &arvados.Cluster{
-               ClusterID:        "zhome",
-               PostgreSQL:       integrationTestCluster().PostgreSQL,
-               ForceLegacyAPI14: forceLegacyAPI14,
+               ClusterID:  "zhome",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
        }
        cluster.TLS.Insecure = true
        cluster.API.MaxItemsPerResponse = 1000
index 40f335e952d6d40820b5ac52d6c2b5ea92d9946c..a35d0030194e8bf9e79d1f2f256ff9fab5621fe7 100644 (file)
@@ -92,23 +92,23 @@ func (h *Handler) setup() {
        })
 
        oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
-       rtr := router.New(federation.New(h.Cluster), api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls))
+       rtr := router.New(federation.New(h.Cluster), router.Config{
+               MaxRequestSize: h.Cluster.API.MaxRequestSize,
+               WrapCalls:      api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls),
+       })
        mux.Handle("/arvados/v1/config", rtr)
-       mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr)
-
-       if !h.Cluster.ForceLegacyAPI14 {
-               mux.Handle("/arvados/v1/collections", rtr)
-               mux.Handle("/arvados/v1/collections/", rtr)
-               mux.Handle("/arvados/v1/users", rtr)
-               mux.Handle("/arvados/v1/users/", rtr)
-               mux.Handle("/arvados/v1/connect/", rtr)
-               mux.Handle("/arvados/v1/container_requests", rtr)
-               mux.Handle("/arvados/v1/container_requests/", rtr)
-               mux.Handle("/arvados/v1/groups", rtr)
-               mux.Handle("/arvados/v1/groups/", rtr)
-               mux.Handle("/login", rtr)
-               mux.Handle("/logout", rtr)
-       }
+       mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr) // must come before .../users/
+       mux.Handle("/arvados/v1/collections", rtr)
+       mux.Handle("/arvados/v1/collections/", rtr)
+       mux.Handle("/arvados/v1/users", rtr)
+       mux.Handle("/arvados/v1/users/", rtr)
+       mux.Handle("/arvados/v1/connect/", rtr)
+       mux.Handle("/arvados/v1/container_requests", rtr)
+       mux.Handle("/arvados/v1/container_requests/", rtr)
+       mux.Handle("/arvados/v1/groups", rtr)
+       mux.Handle("/arvados/v1/groups/", rtr)
+       mux.Handle("/login", rtr)
+       mux.Handle("/logout", rtr)
 
        hs := http.NotFoundHandler()
        hs = prepend(hs, h.proxyRailsAPI)
index d12e4fa33d32a72d8f9b5342c94aab84664fbe03..935208fc4e621c7c8040e09a1381e39c949e0232 100644 (file)
@@ -26,13 +26,9 @@ import (
        check "gopkg.in/check.v1"
 )
 
-var forceLegacyAPI14 bool
-
 // Gocheck boilerplate
 func Test(t *testing.T) {
-       for _, forceLegacyAPI14 = range []bool{false, true} {
-               check.TestingT(t)
-       }
+       check.TestingT(t)
 }
 
 var _ = check.Suite(&HandlerSuite{})
@@ -48,9 +44,8 @@ func (s *HandlerSuite) SetUpTest(c *check.C) {
        s.ctx, s.cancel = context.WithCancel(context.Background())
        s.ctx = ctxlog.Context(s.ctx, ctxlog.New(os.Stderr, "json", "debug"))
        s.cluster = &arvados.Cluster{
-               ClusterID:        "zzzzz",
-               PostgreSQL:       integrationTestCluster().PostgreSQL,
-               ForceLegacyAPI14: forceLegacyAPI14,
+               ClusterID:  "zzzzz",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
        }
        s.cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
        s.cluster.TLS.Insecure = true
@@ -198,10 +193,6 @@ func (s *HandlerSuite) TestLogoutSSO(c *check.C) {
 }
 
 func (s *HandlerSuite) TestLogoutGoogle(c *check.C) {
-       if s.cluster.ForceLegacyAPI14 {
-               // Google login N/A
-               return
-       }
        s.cluster.Login.Google.Enable = true
        s.cluster.Login.Google.ClientID = "test"
        req := httptest.NewRequest("GET", "https://0.0.0.0:1/logout?return_to=https://example.com/foo", nil)
index db1f7f0d0cee51b359c6d10f9ddd1e1b5de790e3..aeaede427ecf6c5106d86a890304418d5d8cb4c7 100644 (file)
@@ -37,11 +37,6 @@ type IntegrationSuite struct {
 }
 
 func (s *IntegrationSuite) SetUpSuite(c *check.C) {
-       if forceLegacyAPI14 {
-               c.Skip("heavy integration tests don't run with forceLegacyAPI14")
-               return
-       }
-
        cwd, _ := os.Getwd()
 
        s.oidcprovider = arvadostest.NewOIDCProvider(c)
index d7381860ea422299406e0a38e726f6d09bb38481..13dfcac16abb0bb27c7b1f3d50d024436453f97c 100644 (file)
@@ -42,6 +42,9 @@ var dropHeaders = map[string]bool{
        "Accept-Encoding":   true,
        "Content-Encoding":  true,
        "Transfer-Encoding": true,
+
+       // Content-Length depends on encoding.
+       "Content-Length": true,
 }
 
 type ResponseFilter func(*http.Response, error) (*http.Response, error)
index eae9e0a8cebc974dca813bba3dfa8f03381bbf2e..06141b1033e3f0034e003eab07da11c17153496e 100644 (file)
@@ -63,7 +63,11 @@ func guessAndParse(k, v string) (interface{}, error) {
 func (rtr *router) loadRequestParams(req *http.Request, attrsKey string) (map[string]interface{}, error) {
        err := req.ParseForm()
        if err != nil {
-               return nil, httpError(http.StatusBadRequest, err)
+               if err.Error() == "http: request body too large" {
+                       return nil, httpError(http.StatusRequestEntityTooLarge, err)
+               } else {
+                       return nil, httpError(http.StatusBadRequest, err)
+               }
        }
        params := map[string]interface{}{}
 
index a313ebc8bed94c5b5b7e32b6c086644b4faae77f..5ceabbfb1d56fab171d8d4a8dfabca585f1362f6 100644 (file)
@@ -7,6 +7,7 @@ package router
 import (
        "context"
        "fmt"
+       "math"
        "net/http"
        "strings"
 
@@ -20,24 +21,32 @@ import (
 )
 
 type router struct {
-       mux       *mux.Router
-       backend   arvados.API
-       wrapCalls func(api.RoutableFunc) api.RoutableFunc
+       mux     *mux.Router
+       backend arvados.API
+       config  Config
+}
+
+type Config struct {
+       // Return an error if request body exceeds this size. 0 means
+       // unlimited.
+       MaxRequestSize int
+
+       // If wrapCalls is not nil, it is called once for each API
+       // method, and the returned method is used in its place. This
+       // can be used to install hooks before and after each API call
+       // and alter responses; see localdb.WrapCallsInTransaction for
+       // an example.
+       WrapCalls func(api.RoutableFunc) api.RoutableFunc
 }
 
 // New returns a new router (which implements the http.Handler
 // interface) that serves requests by calling Arvados API methods on
 // the given backend.
-//
-// If wrapCalls is not nil, it is called once for each API method, and
-// the returned method is used in its place. This can be used to
-// install hooks before and after each API call and alter responses;
-// see localdb.WrapCallsInTransaction for an example.
-func New(backend arvados.API, wrapCalls func(api.RoutableFunc) api.RoutableFunc) *router {
+func New(backend arvados.API, config Config) *router {
        rtr := &router{
-               mux:       mux.NewRouter(),
-               backend:   backend,
-               wrapCalls: wrapCalls,
+               mux:     mux.NewRouter(),
+               backend: backend,
+               config:  config,
        }
        rtr.addRoutes()
        return rtr
@@ -433,8 +442,8 @@ func (rtr *router) addRoutes() {
                },
        } {
                exec := route.exec
-               if rtr.wrapCalls != nil {
-                       exec = rtr.wrapCalls(exec)
+               if rtr.config.WrapCalls != nil {
+                       exec = rtr.config.WrapCalls(exec)
                }
                rtr.addRoute(route.endpoint, route.defaultOpts, exec)
        }
@@ -524,8 +533,26 @@ func (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
        if r.Method == "OPTIONS" {
                return
        }
+       if r.Body != nil {
+               // Wrap r.Body in a http.MaxBytesReader(), otherwise
+               // r.ParseForm() uses a default max request body size
+               // of 10 megabytes. Note we rely on the Nginx
+               // configuration to enforce the real max body size.
+               max := int64(rtr.config.MaxRequestSize)
+               if max < 1 {
+                       max = math.MaxInt64 - 1
+               }
+               r.Body = http.MaxBytesReader(w, r.Body, max)
+       }
        if r.Method == "POST" {
-               r.ParseForm()
+               err := r.ParseForm()
+               if err != nil {
+                       if err.Error() == "http: request body too large" {
+                               err = httpError(http.StatusRequestEntityTooLarge, err)
+                       }
+                       rtr.sendError(w, err)
+                       return
+               }
                if m := r.FormValue("_method"); m != "" {
                        r2 := *r
                        r = &r2
index 18fff7c9cc4f5d4a10f347c3da55ab79ca1bf38d..0330ec4252c9ad3ee8f461faf9ce7508c17bd3fc 100644 (file)
@@ -169,7 +169,7 @@ func (s *RouterIntegrationSuite) SetUpTest(c *check.C) {
        cluster.TLS.Insecure = true
        arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
        url, _ := url.Parse("https://" + os.Getenv("ARVADOS_TEST_API_HOST"))
-       s.rtr = New(rpc.NewConn("zzzzz", url, true, rpc.PassthroughTokenProvider), nil)
+       s.rtr = New(rpc.NewConn("zzzzz", url, true, rpc.PassthroughTokenProvider), Config{})
 }
 
 func (s *RouterIntegrationSuite) TearDownSuite(c *check.C) {
@@ -226,6 +226,34 @@ func (s *RouterIntegrationSuite) TestCollectionResponses(c *check.C) {
        c.Check(jresp["kind"], check.Equals, "arvados#collection")
 }
 
+func (s *RouterIntegrationSuite) TestMaxRequestSize(c *check.C) {
+       token := arvadostest.ActiveTokenV2
+       for _, maxRequestSize := range []int{
+               // Ensure 5M limit is enforced.
+               5000000,
+               // Ensure 50M limit is enforced, and that a >25M body
+               // is accepted even though the default Go request size
+               // limit is 10M.
+               50000000,
+       } {
+               s.rtr.config.MaxRequestSize = maxRequestSize
+               okstr := "a"
+               for len(okstr) < maxRequestSize/2 {
+                       okstr = okstr + okstr
+               }
+
+               hdr := http.Header{"Content-Type": {"application/x-www-form-urlencoded"}}
+
+               body := bytes.NewBufferString(url.Values{"foo_bar": {okstr}}.Encode())
+               _, rr, _ := doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, hdr, body)
+               c.Check(rr.Code, check.Equals, http.StatusOK)
+
+               body = bytes.NewBufferString(url.Values{"foo_bar": {okstr + okstr}}.Encode())
+               _, rr, _ = doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, hdr, body)
+               c.Check(rr.Code, check.Equals, http.StatusRequestEntityTooLarge)
+       }
+}
+
 func (s *RouterIntegrationSuite) TestContainerList(c *check.C) {
        token := arvadostest.ActiveTokenV2
 
index 838de35563e60fb1a994e8cee8c28dd2b9321fbe..e3558c3f41bec4b47c01b2575e4793fbbebb7674 100644 (file)
@@ -35,9 +35,8 @@ func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
        log := ctxlog.TestLogger(c)
 
        handler := &Handler{Cluster: &arvados.Cluster{
-               ClusterID:        "zzzzz",
-               PostgreSQL:       integrationTestCluster().PostgreSQL,
-               ForceLegacyAPI14: forceLegacyAPI14,
+               ClusterID:  "zzzzz",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
        }}
        handler.Cluster.TLS.Insecure = true
        arvadostest.SetServiceURL(&handler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
index fe498d0484b0d41a0ceb0428aafc68a879033cc6..5fcc0903f5d1b43d3b58e83d8e6721832985c4ed 100644 (file)
@@ -55,6 +55,15 @@ type rateLimitedInstanceSet struct {
        ticker *time.Ticker
 }
 
+func (is rateLimitedInstanceSet) Instances(tags cloud.InstanceTags) ([]cloud.Instance, error) {
+       <-is.ticker.C
+       insts, err := is.InstanceSet.Instances(tags)
+       for i, inst := range insts {
+               insts[i] = &rateLimitedInstance{inst, is.ticker}
+       }
+       return insts, err
+}
+
 func (is rateLimitedInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, init cloud.InitCommand, pk ssh.PublicKey) (cloud.Instance, error) {
        <-is.ticker.C
        inst, err := is.InstanceSet.Create(it, image, tags, init, pk)
@@ -71,6 +80,11 @@ func (inst *rateLimitedInstance) Destroy() error {
        return inst.Instance.Destroy()
 }
 
+func (inst *rateLimitedInstance) SetTags(tags cloud.InstanceTags) error {
+       <-inst.ticker.C
+       return inst.Instance.SetTags(tags)
+}
+
 // Adds the specified defaultTags to every Create() call.
 type defaultTaggingInstanceSet struct {
        cloud.InstanceSet
index 030665d77f7a075f289c92d4a715f1d414de9a9b..cec20279d1fdc81cb56936ed731dea6c6a4c0e8c 100644 (file)
@@ -53,6 +53,8 @@ func (c Collection) resourceName() string {
 
 // SizedDigests returns the hash+size part of each data block
 // referenced by the collection.
+//
+// Zero-length blocks are not included.
 func (c *Collection) SizedDigests() ([]SizedDigest, error) {
        manifestText := c.ManifestText
        if manifestText == "" {
@@ -76,6 +78,10 @@ func (c *Collection) SizedDigests() ([]SizedDigest, error) {
                                // FIXME: ensure it's a file token
                                break
                        }
+                       if strings.HasPrefix(token, "d41d8cd98f00b204e9800998ecf8427e+0") {
+                               // Exclude "empty block" placeholder
+                               continue
+                       }
                        // FIXME: shouldn't assume 32 char hash
                        if i := strings.IndexRune(token[33:], '+'); i >= 0 {
                                token = token[:33+i]
diff --git a/sdk/go/arvados/collection_test.go b/sdk/go/arvados/collection_test.go
new file mode 100644 (file)
index 0000000..b217b69
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CollectionSuite{})
+
+type CollectionSuite struct{}
+
+func (s *CollectionSuite) TestSizedDigests(c *check.C) {
+       coll := Collection{ManifestText: ". d41d8cd98f00b204e9800998ecf8427e+0 acbd18db4cc2f85cedef654fccc4a4d8+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy 0:0:foo 0:3:bar 3:3:baz\n"}
+       sd, err := coll.SizedDigests()
+       c.Check(err, check.IsNil)
+       c.Check(sd, check.DeepEquals, []SizedDigest{"acbd18db4cc2f85cedef654fccc4a4d8+3", "73feffa4b7f6bb68e44cf984c85f6e88+3"})
+
+       coll = Collection{ManifestText: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar\n. 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy 0:3:baz\n"}
+       sd, err = coll.SizedDigests()
+       c.Check(err, check.IsNil)
+       c.Check(sd, check.DeepEquals, []SizedDigest{"acbd18db4cc2f85cedef654fccc4a4d8+3", "73feffa4b7f6bb68e44cf984c85f6e88+3"})
+
+       coll = Collection{ManifestText: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"}
+       sd, err = coll.SizedDigests()
+       c.Check(err, check.IsNil)
+       c.Check(sd, check.HasLen, 0)
+
+       coll = Collection{ManifestText: "", PortableDataHash: "d41d8cd98f00b204e9800998ecf8427e+0"}
+       sd, err = coll.SizedDigests()
+       c.Check(err, check.IsNil)
+       c.Check(sd, check.HasLen, 0)
+}
index 2fda7febe5348163c43fb3dc387aca6df971f362..c0170d1d7f53e61460e2e32155a250fe9ed62cc6 100644 (file)
@@ -266,8 +266,6 @@ type Cluster struct {
                SSHHelpHostSuffix      string
                IdleTimeout            Duration
        }
-
-       ForceLegacyAPI14 bool
 }
 
 type Volume struct {
index d90c618f7a1effa8c3da8031cb98909a1b37df3f..4c594625e700d086d7d1a68764b640b247219fda 100644 (file)
@@ -206,6 +206,9 @@ func (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string,
        if scheme == "" {
                scheme = "https"
        }
+       if c.ApiServer == "" {
+               return nil, fmt.Errorf("Arvados client is not configured (target API host is not set). Maybe env var ARVADOS_API_HOST should be set first?")
+       }
        u := url.URL{
                Scheme: scheme,
                Host:   c.ApiServer}
index fc686ad63739e51340d5e254f8f68d65ac4db3e7..9d6e4fe7e8f7e702287db4865c348715c104a4ff 100644 (file)
@@ -10,7 +10,9 @@ import (
        "net/http"
        "os"
        "testing"
+       "time"
 
+       "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
        . "gopkg.in/check.v1"
 )
@@ -28,14 +30,12 @@ var _ = Suite(&MockArvadosServerSuite{})
 type ServerRequiredSuite struct{}
 
 func (s *ServerRequiredSuite) SetUpSuite(c *C) {
-       arvadostest.StartAPI()
        arvadostest.StartKeep(2, false)
        RetryDelay = 0
 }
 
 func (s *ServerRequiredSuite) TearDownSuite(c *C) {
        arvadostest.StopKeep(2)
-       arvadostest.StopAPI()
 }
 
 func (s *ServerRequiredSuite) SetUpTest(c *C) {
@@ -158,6 +158,32 @@ func (s *ServerRequiredSuite) TestAPIDiscovery_Get_noSuchParameter(c *C) {
        c.Assert(value, IsNil)
 }
 
+func (s *ServerRequiredSuite) TestCreateLarge(c *C) {
+       arv, err := MakeArvadosClient()
+       c.Assert(err, IsNil)
+
+       txt := arvados.SignLocator("d41d8cd98f00b204e9800998ecf8427e+0", arv.ApiToken, time.Now().Add(time.Minute), time.Minute, []byte(arvadostest.SystemRootToken))
+       // Ensure our request body is bigger than the Go http server's
+       // default max size, 10 MB.
+       for len(txt) < 12000000 {
+               txt = txt + " " + txt
+       }
+       txt = ". " + txt + " 0:0:foo\n"
+
+       resp := Dict{}
+       err = arv.Create("collections", Dict{
+               "ensure_unique_name": true,
+               "collection": Dict{
+                       "is_trashed":    true,
+                       "name":          "test",
+                       "manifest_text": txt,
+               },
+       }, &resp)
+       c.Check(err, IsNil)
+       c.Check(resp["portable_data_hash"], Not(Equals), "")
+       c.Check(resp["portable_data_hash"], Not(Equals), "d41d8cd98f00b204e9800998ecf8427e+0")
+}
+
 type UnitSuite struct{}
 
 func (s *UnitSuite) TestUUIDMatch(c *C) {
index 726c3fb30c88414cd7e3ea93841084bdfadf61f0..5eafbbe339a9d7d4baa29081a049aa5e924ac61f 100644 (file)
@@ -138,6 +138,10 @@ func (kc *KeepClient) discoverServices() error {
                return nil
        }
 
+       if kc.Arvados.ApiServer == "" {
+               return fmt.Errorf("Arvados client is not configured (target API host is not set). Maybe env var ARVADOS_API_HOST should be set first?")
+       }
+
        svcListCacheMtx.Lock()
        cacheEnt, ok := svcListCache[kc.Arvados.ApiServer]
        if !ok {
index 315fc74a713f42fbee7b7b030c36576ed5426bc0..4fe3999f2c3098391f387f51fe96cc628bf1a790 100644 (file)
@@ -157,7 +157,7 @@ def http_cache(data_type):
     return cache.SafeHTTPCache(path, max_age=60*60*24*2)
 
 def api(version=None, cache=True, host=None, token=None, insecure=False,
-        request_id=None, timeout=5*60, **kwargs):
+        request_id=None, timeout=10, **kwargs):
     """Return an apiclient Resources object for an Arvados instance.
 
     :version:
index a4336049f2447bd18cf396cbec0b76e7cdf69356..35b780071a356f5bb7ab38e053798908b0dafe6b 100644 (file)
@@ -24,6 +24,7 @@ http {
     server_name controller ~.*;
     ssl_certificate "{{SSLCERT}}";
     ssl_certificate_key "{{SSLKEY}}";
+    client_max_body_size 0;
     location  / {
       proxy_pass http://controller;
       proxy_set_header Host $http_host;
index 8d3142ab6aa49980babae66e255d2f183224109e..60183e06a352259530534bedf56da1bbba5c3443 100644 (file)
@@ -101,6 +101,12 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
             text = "X" * maxsize
             arvados.api('v1').collections().create(body={"manifest_text": text}).execute()
 
+    # Checks for bug #17171
+    def test_default_request_timeout(self):
+        api = arvados.api('v1')
+        self.assertEqual(api._http.timeout, 10,
+            "Default timeout value should be 10")
+
     def test_ordered_json_model(self):
         mock_responses = {
             'arvados.humans.get': (
diff --git a/services/api/bin/yarn b/services/api/bin/yarn
deleted file mode 100755 (executable)
index cc54a3b..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env ruby
-
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-APP_ROOT = File.expand_path('..', __dir__)
-Dir.chdir(APP_ROOT) do
-  begin
-    exec "yarnpkg", *ARGV
-  rescue Errno::ENOENT
-    $stderr.puts "Yarn executable was not detected in the system."
-    $stderr.puts "Download Yarn at https://yarnpkg.com/en/docs/install"
-    exit 1
-  end
-end
index 72c11649d86e57870f54b46949142bdd4e91e557..2c259919aeb69e6852076485c30e71ca0dd6f293 100644 (file)
@@ -56,19 +56,25 @@ clusterID, clusterConfig = confs["Clusters"].first
 $arvados_config_defaults = clusterConfig
 $arvados_config_defaults["ClusterID"] = clusterID
 
-# Load the global config file
-Open3.popen2("arvados-server", "config-dump", "-skip-legacy") do |stdin, stdout, status_thread|
-  confs = YAML.load(stdout, deserialize_symbols: false)
-  if confs && !confs.empty?
-    # config-dump merges defaults with user configuration, so every
-    # key should be set.
-    clusterID, clusterConfig = confs["Clusters"].first
-    $arvados_config_global = clusterConfig
-    $arvados_config_global["ClusterID"] = clusterID
-  else
-    # config-dump failed, assume we will be loading from legacy
-    # application.yml, initialize with defaults.
-    $arvados_config_global = $arvados_config_defaults.deep_dup
+if ENV["ARVADOS_CONFIG"] == "none"
+  # Don't load config. This magic value is set by packaging scripts so
+  # they can run "rake assets:precompile" without a real config.
+  $arvados_config_global = $arvados_config_defaults.deep_dup
+else
+  # Load the global config file
+  Open3.popen2("arvados-server", "config-dump", "-skip-legacy") do |stdin, stdout, status_thread|
+    confs = YAML.load(stdout, deserialize_symbols: false)
+    if confs && !confs.empty?
+      # config-dump merges defaults with user configuration, so every
+      # key should be set.
+      clusterID, clusterConfig = confs["Clusters"].first
+      $arvados_config_global = clusterConfig
+      $arvados_config_global["ClusterID"] = clusterID
+    else
+      # config-dump failed, assume we will be loading from legacy
+      # application.yml, initialize with defaults.
+      $arvados_config_global = $arvados_config_defaults.deep_dup
+    end
   end
 end
 
@@ -125,7 +131,7 @@ arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duratio
 arvcfg.declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
 arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
 arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
-arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
+arvcfg.declare_config "Collections.BlobSigningKey", String, :blob_signing_key
 arvcfg.declare_config "Collections.BlobSigningTTL", ActiveSupport::Duration, :blob_signature_ttl
 arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Collections.BlobSigning", !v }
 arvcfg.declare_config "Collections.ForwardSlashNameSubstitution", String
@@ -266,6 +272,15 @@ if ::Rails.env.to_s == "test"
   $arvados_config["PostgreSQL"]["Connection"]["collation"] = "en_US.UTF-8"
 end
 
+if ENV["ARVADOS_CONFIG"] == "none"
+  # We need the postgresql connection URI to be valid, even if we
+  # don't use it.
+  $arvados_config["PostgreSQL"]["Connection"]["host"] = "localhost"
+  $arvados_config["PostgreSQL"]["Connection"]["user"] = "x"
+  $arvados_config["PostgreSQL"]["Connection"]["password"] = "x"
+  $arvados_config["PostgreSQL"]["Connection"]["dbname"] = "x"
+end
+
 if $arvados_config["PostgreSQL"]["Connection"]["password"].empty?
   raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
 end
index 2abe40566ecf03cc0d48054b74690c6d1d7048b6..b6a2895f78ccb2a2f0c09897a4a786bf326babbf 100644 (file)
@@ -10,7 +10,7 @@ require 'enable_jobs_api'
 
 Rails.application.configure do
   begin
-    if ActiveRecord::Base.connection.tables.include?('jobs')
+    if ENV["ARVADOS_CONFIG"] != "none" && ActiveRecord::Base.connection.tables.include?('jobs')
       check_enable_legacy_jobs_api
     end
   rescue ActiveRecord::NoDatabaseError
index 4ea2fa2f6dea89af1b3a744b09a2da6d36e61169..94b59ebd41ea843d80cf32fade7e5dd6168a225e 100644 (file)
@@ -485,13 +485,18 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        }
 
        openPath := "/" + strings.Join(targetPath, "/")
-       if f, err := fs.Open(openPath); os.IsNotExist(err) {
+       f, err := fs.Open(openPath)
+       if os.IsNotExist(err) {
                // Requested non-existent path
                http.Error(w, notFoundMessage, http.StatusNotFound)
+               return
        } else if err != nil {
                // Some other (unexpected) error
                http.Error(w, "open: "+err.Error(), http.StatusInternalServerError)
-       } else if stat, err := f.Stat(); err != nil {
+               return
+       }
+       defer f.Close()
+       if stat, err := f.Stat(); err != nil {
                // Can't get Size/IsDir (shouldn't happen with a collectionFS!)
                http.Error(w, "stat: "+err.Error(), http.StatusInternalServerError)
        } else if stat.IsDir() && !strings.HasSuffix(r.URL.Path, "/") {
@@ -504,15 +509,14 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
                h.serveDirectory(w, r, collection.Name, fs, openPath, true)
        } else {
                http.ServeContent(w, r, basename, stat.ModTime(), f)
-               if wrote := int64(w.WroteBodyBytes()); wrote != stat.Size() && r.Header.Get("Range") == "" {
+               if wrote := int64(w.WroteBodyBytes()); wrote != stat.Size() && w.WroteStatus() == http.StatusOK {
                        // If we wrote fewer bytes than expected, it's
                        // too late to change the real response code
                        // or send an error message to the client, but
                        // at least we can try to put some useful
                        // debugging info in the logs.
                        n, err := f.Read(make([]byte, 1024))
-                       ctxlog.FromContext(r.Context()).Errorf("stat.Size()==%d but only wrote %d bytes; read(1024) returns %d, %s", stat.Size(), wrote, n, err)
-
+                       ctxlog.FromContext(r.Context()).Errorf("stat.Size()==%d but only wrote %d bytes; read(1024) returns %d, %v", stat.Size(), wrote, n, err)
                }
        }
 }
index 5291efeb822a4a2fe22af022cf15208d0ee1ba7f..9252bd82d7e15cb86424106c1c76a5009061189f 100644 (file)
@@ -6,6 +6,7 @@ package main
 
 import (
        "bytes"
+       "context"
        "fmt"
        "html"
        "io/ioutil"
@@ -16,6 +17,7 @@ import (
        "path/filepath"
        "regexp"
        "strings"
+       "time"
 
        "git.arvados.org/arvados.git/lib/config"
        "git.arvados.org/arvados.git/sdk/go/arvados"
@@ -24,6 +26,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/auth"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "git.arvados.org/arvados.git/sdk/go/keepclient"
+       "github.com/sirupsen/logrus"
        check "gopkg.in/check.v1"
 )
 
@@ -72,6 +75,64 @@ func (s *UnitSuite) TestCORSPreflight(c *check.C) {
        c.Check(resp.Code, check.Equals, http.StatusMethodNotAllowed)
 }
 
+func (s *UnitSuite) TestEmptyResponse(c *check.C) {
+       for _, trial := range []struct {
+               dataExists    bool
+               sendIMSHeader bool
+               expectStatus  int
+               logRegexp     string
+       }{
+               // If we return no content due to a Keep read error,
+               // we should emit a log message.
+               {false, false, http.StatusOK, `(?ms).*only wrote 0 bytes.*`},
+
+               // If we return no content because the client sent an
+               // If-Modified-Since header, our response should be
+               // 304, and we should not emit a log message.
+               {true, true, http.StatusNotModified, ``},
+       } {
+               c.Logf("trial: %+v", trial)
+               arvadostest.StartKeep(2, true)
+               if trial.dataExists {
+                       arv, err := arvadosclient.MakeArvadosClient()
+                       c.Assert(err, check.IsNil)
+                       arv.ApiToken = arvadostest.ActiveToken
+                       kc, err := keepclient.MakeKeepClient(arv)
+                       c.Assert(err, check.IsNil)
+                       _, _, err = kc.PutB([]byte("foo"))
+                       c.Assert(err, check.IsNil)
+               }
+
+               h := handler{Config: newConfig(s.Config)}
+               u := mustParseURL("http://" + arvadostest.FooCollection + ".keep-web.example/foo")
+               req := &http.Request{
+                       Method:     "GET",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+                       Header: http.Header{
+                               "Authorization": {"Bearer " + arvadostest.ActiveToken},
+                       },
+               }
+               if trial.sendIMSHeader {
+                       req.Header.Set("If-Modified-Since", strings.Replace(time.Now().UTC().Format(time.RFC1123), "UTC", "GMT", -1))
+               }
+
+               var logbuf bytes.Buffer
+               logger := logrus.New()
+               logger.Out = &logbuf
+               req = req.WithContext(ctxlog.Context(context.Background(), logger))
+
+               resp := httptest.NewRecorder()
+               h.ServeHTTP(resp, req)
+               c.Check(resp.Code, check.Equals, trial.expectStatus)
+               c.Check(resp.Body.String(), check.Equals, "")
+
+               c.Log(logbuf.String())
+               c.Check(logbuf.String(), check.Matches, trial.logRegexp)
+       }
+}
+
 func (s *UnitSuite) TestInvalidUUID(c *check.C) {
        bogusID := strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + "-"
        token := arvadostest.ActiveToken
@@ -237,7 +298,6 @@ func (s *IntegrationSuite) doVhostRequestsWithHostPath(c *check.C, authz authori
                if tok == arvadostest.ActiveToken {
                        c.Check(code, check.Equals, http.StatusOK)
                        c.Check(body, check.Equals, "foo")
-
                } else {
                        c.Check(code >= 400, check.Equals, true)
                        c.Check(code < 500, check.Equals, true)
diff --git a/tools/salt-install/.gitignore b/tools/salt-install/.gitignore
new file mode 100644 (file)
index 0000000..55096af
--- /dev/null
@@ -0,0 +1,3 @@
+local_config_dir
+local.params
+*pem
index 10d08b414adfdc726e586be2b03a8b1c8b2afdd4..b1ebb973b9629bb4133f41a1dc01e10c7d0e3bfc 100644 (file)
@@ -6,15 +6,21 @@
 
 ##### About
 
-This directory holds a small script to install Arvados on a single node, using the
-[Saltstack arvados-formula](https://github.com/saltstack-formulas/arvados-formula)
+This directory holds a small script to help you get Arvados up and running, using the
+[Saltstack arvados-formula](https://github.com/arvados/arvados-formula.git)
 in master-less mode.
 
-The fastest way to get it running is to modify the first lines in the `provision.sh`
-script to suit your needs, copy it in the host where you want to install Arvados
-and run it as root.
+There are a few preset examples that you can use:
 
-There's an example `Vagrantfile` also, to install it in a vagrant box if you want
+* `single_host`: Install all the Arvados components in a single host. Suitable for testing
+  or demo-ing, but not recommended for production use.
+* `multi_host/aws`: Let's you install different Arvados components in different hosts on AWS.
+  
+The fastest way to get it running is to copy the `local.params.example` file to `local.params`,
+edit and modify the file to suit your needs, copy this file along with the `provision.sh` script
+into the host where you want to install Arvados and run the `provision.sh` script as root.
+
+There's an example `Vagrantfile` also, to install Arvados in a vagrant box if you want
 to try it locally.
 
 For more information, please read https://doc.arvados.org/main/install/salt-single-host.html
index 6966ea83452f74558a4749f44d01d3b076a629d6..3019a9fb1cb50ac5595c0b76228489f2b1e9e4d4 100644 (file)
@@ -11,32 +11,83 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   config.ssh.insert_key = false
   config.ssh.forward_x11 = true
 
-  config.vm.define "arvados" do |arv|
-    arv.vm.box = "bento/debian-10"
-    arv.vm.hostname = "vagrant.local"
-    # CPU/RAM
-    config.vm.provider :virtualbox do |v|
-      v.memory = 2048
-      v.cpus = 2
-    end
+   # A single_host multiple_hostnames example
+   config.vm.define "arvados-sh-mn" do |arv|
+     arv.vm.box = "bento/debian-10"
+     arv.vm.hostname = "harpo"
+     # CPU/RAM
+     config.vm.provider :virtualbox do |v|
+       v.memory = 2048
+       v.cpus = 2
+     end
 
-    # Networking
-    arv.vm.network "forwarded_port", guest: 8443, host: 8443
-    arv.vm.network "forwarded_port", guest: 25100, host: 25100
-    arv.vm.network "forwarded_port", guest: 9002, host: 9002
-    arv.vm.network "forwarded_port", guest: 9000, host: 9000
-    arv.vm.network "forwarded_port", guest: 8900, host: 8900
-    arv.vm.network "forwarded_port", guest: 8002, host: 8002
-    arv.vm.network "forwarded_port", guest: 8001, host: 8001
-    arv.vm.network "forwarded_port", guest: 8000, host: 8000
-    arv.vm.network "forwarded_port", guest: 3001, host: 3001
-    arv.vm.provision "shell",
-                     path: "provision.sh",
-                     args: [
-                       # "--debug",
-                       "--test",
-                       "--vagrant",
-                       "--ssl-port=8443"
-                     ].join(" ")
-  end
+     # Networking
+     # WEBUI PORT
+     arv.vm.network "forwarded_port", guest: 8443, host: 8443
+     # KEEPPROXY
+     arv.vm.network "forwarded_port", guest: 25101, host: 25101
+     # KEEPWEB
+     arv.vm.network "forwarded_port", guest: 9002, host: 9002
+     # WEBSOCKET
+     arv.vm.network "forwarded_port", guest: 8002, host: 8002
+     arv.vm.provision "shell",
+                      inline: "cp -vr /vagrant/config_examples/single_host/multiple_hostnames /home/vagrant/local_config_dir;
+                               cp -vr /vagrant/tests /home/vagrant/tests;
+                               sed 's#cluster_fixme_or_this_wont_work#harpo#g;
+                                    s#domain_fixme_or_this_wont_work#local#g;
+                                    s/#\ BRANCH=\"master\"/\ BRANCH=\"master\"/g;
+                                    s#CONTROLLER_EXT_SSL_PORT=443#CONTROLLER_EXT_SSL_PORT=8443#g' \
+                                    /vagrant/local.params.example.single_host_multiple_hostnames > /tmp/local.params.single_host_multiple_hostnames"
+     arv.vm.provision "shell",
+                      path: "provision.sh",
+                      args: [
+                        # "--debug",
+                        "--config /tmp/local.params.single_host_multiple_hostnames",
+                        "--test",
+                        "--vagrant"
+                      ].join(" ")
+   end
+
+   # A single_host single_hostname example
+   config.vm.define "arvados-sh-sn" do |arv|
+     arv.vm.box = "bento/debian-10"
+     arv.vm.hostname = "zeppo"
+     # CPU/RAM
+     config.vm.provider :virtualbox do |v|
+       v.memory = 2048
+       v.cpus = 2
+     end
+     # Networking
+     # WEBUI PORT
+     arv.vm.network "forwarded_port", guest: 9443, host: 9443
+     # WORKBENCH1
+     arv.vm.network "forwarded_port", guest: 9444, host: 9444
+     # WORKBENCH2
+     arv.vm.network "forwarded_port", guest: 9445, host: 9445
+     # KEEPPROXY
+     arv.vm.network "forwarded_port", guest: 35101, host: 35101
+     # KEEPWEB
+     arv.vm.network "forwarded_port", guest: 11002, host: 11002
+     # WEBSHELL
+     arv.vm.network "forwarded_port", guest: 14202, host: 14202
+     # WEBSOCKET
+     arv.vm.network "forwarded_port", guest: 18002, host: 18002
+     arv.vm.provision "shell",
+                      inline: "cp -vr /vagrant/config_examples/single_host/single_hostname /home/vagrant/local_config_dir;
+                               cp -vr /vagrant/tests /home/vagrant/tests;
+                               sed 's#HOSTNAME_EXT=\"\"#HOSTNAME_EXT=\"zeppo.local\"#g;
+                                    s#cluster_fixme_or_this_wont_work#zeppo#g;
+                                    s/#\ BRANCH=\"master\"/\ BRANCH=\"master\"/g;
+                                    s#domain_fixme_or_this_wont_work#local#g;' \
+                                    /vagrant/local.params.example.single_host_single_hostname > /tmp/local.params.single_host_single_hostname"
+     arv.vm.provision "shell",
+                      path: "provision.sh",
+                      args: [
+                        # "--debug",
+                        "--config /tmp/local.params.single_host_single_hostname",
+                        "--test",
+                        "--vagrant"
+                      ].join(" ")
+   end
 end
diff --git a/tools/salt-install/config_examples/multi_host/aws/README.md b/tools/salt-install/config_examples/multi_host/aws/README.md
new file mode 100644 (file)
index 0000000..58911d9
--- /dev/null
@@ -0,0 +1,9 @@
+Arvados installation using multiple instances
+=============================================
+
+These files let you setup Arvados on multiple instances on AWS. This setup
+considers deploying the instances on an isolated VPC, created/managed with
+[the Arvados terraform code](https://github.com/arvados/arvados/tree/terraform/tools/terraform)
+in our repo.
+
+Please check [the Arvados installation documentation](https://doc.arvados.org/install/salt-multi-host.html) for more details.
diff --git a/tools/salt-install/config_examples/multi_host/aws/certs/README.md b/tools/salt-install/config_examples/multi_host/aws/certs/README.md
new file mode 100644 (file)
index 0000000..00d486e
--- /dev/null
@@ -0,0 +1,19 @@
+SSL Certificates
+================
+
+Add the certificates for your hosts in this directory.
+
+The nodes requiring certificates are:
+
+* CLUSTER.DOMAIN
+* collections.CLUSTER.DOMAIN
+* \*\-\-collections.CLUSTER.DOMAIN
+* download.CLUSTER.DOMAIN
+* keep.CLUSTER.DOMAIN
+* workbench.CLUSTER.DOMAIN
+* workbench2.CLUSTER.DOMAIN
+* ws.CLUSTER.DOMAIN
+
+They can be individual certificates or a wildcard certificate for all of them.
+
+Please remember to modify the *nginx\_\** salt pillars accordingly.
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
new file mode 100644 (file)
index 0000000..4ecc65e
--- /dev/null
@@ -0,0 +1,264 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# The variables commented out are the default values that the formula uses.
+# The uncommented values are REQUIRED values. If you don't set them, running
+# this formula will fail.
+arvados:
+  ### GENERAL CONFIG
+  version: '__VERSION__'
+  ## It makes little sense to disable this flag, but you can, if you want :)
+  # use_upstream_repo: true
+
+  ## Repo URL is built with grains values. If desired, it can be completely
+  ## overwritten with the pillar parameter 'repo_url'
+  # repo:
+  #   humanname: Arvados Official Repository
+
+  release: __RELEASE__
+
+  ## IMPORTANT!!!!!
+  ## api, workbench and shell require some gems, so you need to make sure ruby
+  ## and deps are installed in order to install and compile the gems.
+  ## We default to `false` in these two variables as it's expected you already
+  ## manage OS packages with some other tool and you don't want us messing up
+  ## with your setup.
+  ruby:
+    ## We set these to `true` here for testing purposes.
+    ## They both default to `false`.
+    manage_ruby: true
+    manage_gems_deps: true
+    # pkg: ruby
+    # gems_deps:
+    #     - curl
+    #     - g++
+    #     - gcc
+    #     - git
+    #     - libcurl4
+    #     - libcurl4-gnutls-dev
+    #     - libpq-dev
+    #     - libxml2
+    #     - libxml2-dev
+    #     - make
+    #     - python3-dev
+    #     - ruby-dev
+    #     - zlib1g-dev
+
+  # config:
+  #   file: /etc/arvados/config.yml
+  #   user: root
+  ## IMPORTANT!!!!!
+  ## If you're intalling any of the rails apps (api, workbench), the group
+  ## should be set to that of the web server, usually `www-data`
+  #   group: root
+  #   mode: 640
+  dispatcher:
+    pkg:
+      name: arvados-dispatch-cloud
+    service:
+      name: arvados-dispatch-cloud
+
+  ### ARVADOS CLUSTER CONFIG
+  cluster:
+    name: __CLUSTER__
+    domain: __DOMAIN__
+
+    database:
+      # max concurrent connections per arvados server daemon
+      # connection_pool_max: 32
+      name: __CLUSTER___arvados
+      host: __DATABASE_INT_IP__
+      password: "__DATABASE_PASSWORD__"
+      user: __CLUSTER___arvados
+      encoding: en_US.utf8
+      client_encoding: UTF8
+
+    tls:
+      # certificate: ''
+      # key: ''
+      # required to test with arvados-snakeoil certs
+      insecure: false
+
+    ### TOKENS
+    tokens:
+      system_root: __SYSTEM_ROOT_TOKEN__
+      management: __MANAGEMENT_TOKEN__
+      anonymous_user: __ANONYMOUS_USER_TOKEN__
+
+    ### KEYS
+    secrets:
+      blob_signing_key: __BLOB_SIGNING_KEY__
+      workbench_secret_key: __WORKBENCH_SECRET_KEY__
+
+    Login:
+      Test:
+        Enable: true
+        Users:
+          __INITIAL_USER__:
+            Email: __INITIAL_USER_EMAIL__
+            Password: __INITIAL_USER_PASSWORD__
+
+    ### CONTAINERS
+    Containers:
+      MaxRetryAttempts: 10
+      CloudVMs:
+        ResourceTags:
+          Name: __CLUSTER__-compute-node
+        BootProbeCommand: 'sudo docker ps -q'
+        ImageID: ami-FIXMEFIXMEFIXMEFI
+        Driver: ec2
+        DriverParameters:
+          Region: FIXME
+          EBSVolumeType: gp2
+          AdminUsername: FIXME
+          ### This SG should allow SSH from the dispatcher to the compute nodes
+          SecurityGroupIDs: ['sg-FIXMEFIXMEFIXMEFI']
+          SubnetID: subnet-FIXMEFIXMEFIXMEFI
+      DispatchPrivateKey: |
+        -----BEGIN OPENSSH PRIVATE KEY-----
+        Read https://doc.arvados.org/v2.0/install/install-dispatch-cloud.html
+        for details on how to create it and where to place the key
+        FIXMEFIXMEFIXMEFI
+        -----END OPENSSH PRIVATE KEY-----
+
+    ### VOLUMES
+    ## This should usually match all your `keepstore` instances
+    Volumes:
+      # the volume name will be composed with
+      # <cluster>-nyw5e-<volume>
+      __CLUSTER__-nyw5e-0000000000000000:
+        AccessViaHosts:
+          'http://__KEEPSTORE0_INT_IP__:25107':
+            ReadOnly: false
+        Replication: 2
+        Driver: S3
+        DriverParameters:
+          Bucket: __CLUSTER__-nyw5e-0000000000000000-volume
+          IAMRole: __CLUSTER__-keepstore-00-iam-role
+          Region: FIXME
+      __CLUSTER__-nyw5e-0000000000000001:
+        AccessViaHosts:
+          'http://__KEEPSTORE1_INT_IP__:25107':
+            ReadOnly: false
+        Replication: 2
+        Driver: S3
+        DriverParameters:
+          Bucket: __CLUSTER__-nyw5e-0000000000000001-volume
+          IAMRole: __CLUSTER__-keepstore-01-iam-role
+          Region: FIXME
+
+    Users:
+      NewUsersAreActive: true
+      AutoAdminFirstUser: true
+      AutoSetupNewUsers: true
+      AutoSetupNewUsersWithRepository: true
+
+    Services:
+      Controller:
+        ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
+        InternalURLs:
+          'http://localhost:8003': {}
+      DispatchCloud:
+        InternalURLs:
+          'http://__CONTROLLER_INT_IP__:9006': {}
+      Keepproxy:
+        ExternalURL: 'https://keep.__CLUSTER__.__DOMAIN__:__KEEP_EXT_SSL_PORT__'
+        InternalURLs:
+          'http://localhost:25107': {}
+      Keepstore:
+        InternalURLs:
+          'http://__KEEPSTORE0_INT_IP__:25107': {}
+          'http://__KEEPSTORE1_INT_IP__:25107': {}
+      RailsAPI:
+        InternalURLs:
+          'http://localhost:8004': {}
+      WebDAV:
+        ExternalURL: 'https://*--collections.__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__/'
+        InternalURLs:
+          'http://localhost:9002': {}
+      WebDAVDownload:
+        ExternalURL: 'https://download.__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'
+      WebShell:
+        ExternalURL: 'https://webshell.__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'
+      Websocket:
+        ExternalURL: 'wss://ws.__CLUSTER__.__DOMAIN__/websocket'
+        InternalURLs:
+          'http://localhost:8005': {}
+      Workbench1:
+        ExternalURL: 'https://workbench.__CLUSTER__.__DOMAIN__:__WORKBENCH1_EXT_SSL_PORT__'
+      Workbench2:
+        ExternalURL: 'https://workbench2.__CLUSTER__.__DOMAIN__:__WORKBENCH2_EXT_SSL_PORT__'
+
+    InstanceTypes:
+      t3small:
+        ProviderType: t3.small
+        VCPUs: 2
+        RAM: 2GiB
+        IncludedScratch: 50GB
+        AddedScratch: 50GB
+        Price: 0.0208
+      c5large:
+        ProviderType: c5.large
+        VCPUs: 2
+        RAM: 4GiB
+        IncludedScratch: 50GB
+        AddedScratch: 50GB
+        Price: 0.085
+      m5large:
+        ProviderType: m5.large
+        VCPUs: 2
+        RAM: 8GiB
+        IncludedScratch: 50GB
+        AddedScratch: 50GB
+        Price: 0.096
+      c5xlarge:
+        ProviderType: c5.xlarge
+        VCPUs: 4
+        RAM: 8GiB
+        IncludedScratch: 100GB
+        AddedScratch: 100GB
+        Price: 0.17
+      m5xlarge:
+        ProviderType: m5.xlarge
+        VCPUs: 4
+        RAM: 16GiB
+        IncludedScratch: 100GB
+        AddedScratch: 100GB
+        Price: 0.192
+      m5xlarge_extradisk:
+        ProviderType: m5.xlarge
+        VCPUs: 4
+        RAM: 16GiB
+        IncludedScratch: 400GB
+        AddedScratch: 400GB
+        Price: 0.193
+      c52xlarge:
+        ProviderType: c5.2xlarge
+        VCPUs: 8
+        RAM: 16GiB
+        IncludedScratch: 200GB
+        AddedScratch: 200GB
+        Price: 0.34
+      m52xlarge:
+        ProviderType: m5.2xlarge
+        VCPUs: 8
+        RAM: 32GiB
+        IncludedScratch: 200GB
+        AddedScratch: 200GB
+        Price: 0.384
+      c54xlarge:
+        ProviderType: c5.4xlarge
+        VCPUs: 16
+        RAM: 32GiB
+        IncludedScratch: 400GB
+        AddedScratch: 400GB
+        Price: 0.68
+      m54xlarge:
+        ProviderType: m5.4xlarge
+        VCPUs: 16
+        RAM: 64GiB
+        IncludedScratch: 400GB
+        AddedScratch: 400GB
+        Price: 0.768
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt.sls
new file mode 100644 (file)
index 0000000..6ba8b9b
--- /dev/null
@@ -0,0 +1,30 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  use_package: true
+  pkgs:
+    - certbot: latest
+    - python3-certbot-nginx
+  config:
+    server: https://acme-v02.api.letsencrypt.org/directory
+    email: __INITIAL_USER_EMAIL__
+    authenticator: nginx
+    webroot-path: /var/www
+    agree-tos: true
+    keep-until-expiring: true
+    expand: true
+    max-log-backups: 0
+    deploy-hook: systemctl reload nginx
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    ### LETSENCRYPT DEFAULT PATH
+    letsencrypt_well_known.conf:
+      - location /.well-known:
+        - root: /var/www
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls
new file mode 100644 (file)
index 0000000..68c8512
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    __CLUSTER__.__DOMAIN__:
+      - __CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    __CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls
new file mode 100644 (file)
index 0000000..3056b89
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    keep.__CLUSTER__.__DOMAIN__:
+      - keep.__CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    keep.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/keep.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/keep.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls
new file mode 100644 (file)
index 0000000..dc34ea6
--- /dev/null
@@ -0,0 +1,23 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    download.__CLUSTER__.__DOMAIN__:
+      - download.__CLUSTER__.__DOMAIN__
+    collections.__CLUSTER__.__DOMAIN__:
+      - collections.__CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    download.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/download.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/download.__CLUSTER__.__DOMAIN__/privkey.pem
+    collections.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/collections.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/collections.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls
new file mode 100644 (file)
index 0000000..e9d2bb0
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    webshell.__CLUSTER__.__DOMAIN__:
+      - webshell.__CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    webshell.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/webshell.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/webshell.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls
new file mode 100644 (file)
index 0000000..d24431f
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    ws.__CLUSTER__.__DOMAIN__:
+      - ws.__CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    ws.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/ws.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/ws.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls
new file mode 100644 (file)
index 0000000..5aa6342
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    workbench2.__CLUSTER__.__DOMAIN__:
+      - workbench2.__CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    workbench2.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/workbench2.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/workbench2.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls
new file mode 100644 (file)
index 0000000..4620f79
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+  domainsets:
+    workbench.__CLUSTER__.__DOMAIN__:
+      - workbench.__CLUSTER__.__DOMAIN__
+
+### NGINX
+nginx:
+  ### SNIPPETS
+  snippets:
+    workbench.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
+      - ssl_certificate: /etc/letsencrypt/live/workbench.__CLUSTER__.__DOMAIN__/fullchain.pem
+      - ssl_certificate_key: /etc/letsencrypt/live/workbench.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls
new file mode 100644 (file)
index 0000000..c0b0870
--- /dev/null
@@ -0,0 +1,28 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SITES
+  servers:
+    managed:
+      arvados_api:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - listen: 'localhost:8004'
+            - server_name: api
+            - root: /var/www/arvados-api/current/public
+            - index:  index.html index.htm
+            - access_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+            - error_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.error.log
+            - passenger_enabled: 'on'
+            - client_max_body_size: 128m
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
new file mode 100644 (file)
index 0000000..3be1696
--- /dev/null
@@ -0,0 +1,61 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        'geo $external_client':
+          default: 1
+          '127.0.0.0/8': 0
+          '__CLUSTER_INT_CIDR__': 0
+        upstream controller_upstream:
+          - server: 'localhost:8003  fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_controller_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __CLUSTER__.__DOMAIN__
+            - listen:
+              - 80 default
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_controller_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-__CLUSTER__.__DOMAIN__-__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: __CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://controller_upstream'
+              - proxy_read_timeout: 300
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_set_header: 'X-External-Client $external_client'
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.error.log
+            - client_max_body_size: 128m
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls
new file mode 100644 (file)
index 0000000..5d8b37e
--- /dev/null
@@ -0,0 +1,59 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream keepproxy_upstream:
+          - server: 'localhost:25107 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_keepproxy_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: keep.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_keepproxy_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-keep.__CLUSTER__.__DOMAIN__-keep.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: keep.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://keepproxy_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_body_buffer_size: 64M
+            - client_max_body_size: 64M
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/keep.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls
new file mode 100644 (file)
index 0000000..fca4216
--- /dev/null
@@ -0,0 +1,89 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream collections_downloads_upstream:
+          - server: 'localhost:9002 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_collections_download_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: '~^((.*--)?collections|download)\.__CLUSTER__\.__DOMAIN__'
+            - listen:
+              - 80
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      ### COLLECTIONS
+      arvados_collections_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-collections.__CLUSTER__.__DOMAIN__-collections.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: '~^(.*--)?collections\.__CLUSTER__\.__DOMAIN__'
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://collections_downloads_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_max_body_size: 0
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/collections.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
+
+      ### DOWNLOAD
+      arvados_download_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-download.__CLUSTER__.__DOMAIN__-download.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: download.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://collections_downloads_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_max_body_size: 0
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/download.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/download.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/download.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_passenger.sls
new file mode 100644 (file)
index 0000000..a2df3ff
--- /dev/null
@@ -0,0 +1,53 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  install_from_phusionpassenger: true
+  lookup:
+    passenger_package: libnginx-mod-http-passenger
+    passenger_config_file: /etc/nginx/conf.d/mod-http-passenger.conf
+
+  ### SNIPPETS
+  snippets:
+    # Based on https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4
+    ssl_hardening_default.conf:
+      - ssl_session_timeout: 1d
+      - ssl_session_cache: 'shared:arvadosSSL:10m'
+      - ssl_session_tickets: 'off'
+
+      # intermediate configuration
+      - ssl_protocols: TLSv1.2 TLSv1.3
+      - ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
+      - ssl_prefer_server_ciphers: 'off'
+
+      # HSTS (ngx_http_headers_module is required) (63072000 seconds)
+      - add_header: 'Strict-Transport-Security "max-age=63072000" always'
+
+      # OCSP stapling
+      - ssl_stapling: 'on'
+      - ssl_stapling_verify: 'on'
+
+      # verify chain of trust of OCSP response using Root CA and Intermediate certs
+      # - ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates
+
+      # curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
+      # - ssl_dhparam: /path/to/dhparam
+
+      # replace with the IP address of your resolver
+      # - resolver: 127.0.0.1
+
+  ### SERVER
+  server:
+    config:
+      include: 'modules-enabled/*.conf'
+      worker_processes: 4
+
+  ### SITES
+  servers:
+    managed:
+      # Remove default webserver
+      default:
+        enabled: false
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls
new file mode 100644 (file)
index 0000000..46f8ad0
--- /dev/null
@@ -0,0 +1,76 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+
+      ### STREAMS
+      http:
+        upstream webshell_upstream:
+          - server: 'localhost:4200 fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      arvados_webshell_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: webshell.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_webshell_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-webshell.__CLUSTER__.__DOMAIN__-webshell.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: webshell.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /shell.__CLUSTER__.__DOMAIN__:
+              - proxy_pass: 'http://webshell_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_ssl_session_reuse: 'off'
+
+              - "if ($request_method = 'OPTIONS')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+                - add_header: "'Access-Control-Max-Age' 1728000"
+                - add_header: "'Content-Type' 'text/plain charset=UTF-8'"
+                - add_header: "'Content-Length' 0"
+                - return: 204
+
+              - "if ($request_method = 'POST')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+              - "if ($request_method = 'GET')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/webshell.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log
+
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls
new file mode 100644 (file)
index 0000000..e89b780
--- /dev/null
@@ -0,0 +1,60 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream websocket_upstream:
+          - server: 'localhost:8005 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_websocket_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: ws.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_websocket_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-ws.__CLUSTER__.__DOMAIN__-ws.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: ws.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://websocket_upstream'
+              - proxy_read_timeout: 600
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: 'Host $host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'Upgrade $http_upgrade'
+              - proxy_set_header: 'Connection "upgrade"'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_body_buffer_size: 64M
+            - client_max_body_size: 64M
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/ws.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls
new file mode 100644 (file)
index 0000000..a3e58e2
--- /dev/null
@@ -0,0 +1,50 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_workbench2_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: workbench2.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_workbench2_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-workbench2.__CLUSTER__.__DOMAIN__-workbench2.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: workbench2.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - root: /var/www/arvados-workbench2/workbench2
+              - try_files: '$uri $uri/ /index.html'
+              - 'if (-f $document_root/maintenance.html)':
+                - return: 503
+            - location /config.json:
+              - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__"}' ~ "'" }}
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/workbench2.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls
new file mode 100644 (file)
index 0000000..38e59cc
--- /dev/null
@@ -0,0 +1,75 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+
+      ### STREAMS
+      http:
+        upstream workbench_upstream:
+          - server: 'localhost:9000 fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_workbench_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: workbench.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - include: snippets/letsencrypt_well_known.conf
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_workbench_ssl:
+        enabled: true
+        overwrite: true
+        requires:
+          cmd: create-initial-cert-workbench.__CLUSTER__.__DOMAIN__-workbench.__CLUSTER__.__DOMAIN__
+        config:
+          - server:
+            - server_name: workbench.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://workbench_upstream'
+              - proxy_read_timeout: 300
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+            - include: snippets/ssl_hardening_default.conf
+            - include: snippets/workbench.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log
+
+      arvados_workbench_upstream:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - listen: 'localhost:9000'
+            - server_name: workbench
+            - root: /var/www/arvados-workbench/current/public
+            - index:  index.html index.htm
+            - passenger_enabled: 'on'
+            # yamllint disable-line rule:line-length
+            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
new file mode 100644 (file)
index 0000000..a0da9a1
--- /dev/null
@@ -0,0 +1,42 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### POSTGRESQL
+postgres:
+  use_upstream_repo: true
+  version: '11'
+  postgresconf: |-
+    listen_addresses = '*'  # listen on all interfaces
+  acls:
+    - ['local', 'all', 'postgres', 'peer']
+    - ['local', 'all', 'all', 'peer']
+    - ['host', 'all', 'all', '127.0.0.1/32', 'md5']
+    - ['host', 'all', 'all', '::1/128', 'md5']
+    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']
+    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '__CONTROLLER_INT_IP__/32']
+  users:
+    __CLUSTER___arvados:
+      ensure: present
+      password: __DATABASE_PASSWORD__
+
+  # tablespaces:
+  #   arvados_tablespace:
+  #     directory: /path/to/some/tbspace/arvados_tbsp
+  #     owner: arvados
+
+  databases:
+    __CLUSTER___arvados:
+      owner: __CLUSTER___arvados
+      template: template0
+      lc_ctype: en_US.utf8
+      lc_collate: en_US.utf8
+      # tablespace: arvados_tablespace
+      schemas:
+        public:
+          owner: __CLUSTER___arvados
+      extensions:
+        pg_trgm:
+          if_not_exists: true
+          schema: public
diff --git a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
new file mode 100644 (file)
index 0000000..82fb6f4
--- /dev/null
@@ -0,0 +1,71 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+{%- set curr_tpldir = tpldir %}
+{%- set tpldir = 'arvados' %}
+{%- from "arvados/map.jinja" import arvados with context %}
+{%- set tpldir = curr_tpldir %}
+
+#CRUDE, but functional
+extra_extra_hosts_entries_etc_hosts_database_host_present:
+  host.present:
+    - ip: __DATABASE_INT_IP__
+    - names:
+      - db.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+      - database.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_api_host_present:
+  host.present:
+    - ip: __CONTROLLER_INT_IP__
+    - names:
+      - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_websocket_host_present:
+  host.present:
+    - ip: __CONTROLLER_INT_IP__
+    - names:
+      - ws.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_workbench_host_present:
+  host.present:
+    - ip: __WORKBENCH1_INT_IP__
+    - names:
+      - workbench.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_workbench2_host_present:
+  host.present:
+    - ip: __WORKBENCH1_INT_IP__
+    - names:
+      - workbench2.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_keepproxy_host_present:
+  host.present:
+    - ip: __KEEP_INT_IP__
+    - names:
+      - keep.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_keepweb_host_present:
+  host.present:
+    - ip: __KEEP_INT_IP__
+    - names:
+      - download.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+      - collections.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_shell_host_present:
+  host.present:
+    - ip: __WEBSHELL_INT_IP__
+    - names:
+      - shell.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_keep0_host_present:
+  host.present:
+    - ip: __KEEPSTORE0_INT_IP__
+    - names:
+      - keep0.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+
+extra_extra_hosts_entries_etc_hosts_keep1_host_present:
+  host.present:
+    - ip: __KEEPSTORE1_INT_IP__
+    - names:
+      - keep1.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/README.md b/tools/salt-install/config_examples/single_host/multiple_hostnames/README.md
new file mode 100644 (file)
index 0000000..17ca89a
--- /dev/null
@@ -0,0 +1,20 @@
+Single host with multiple hostnames
+===================================
+
+These files let you setup Arvados on a single host using different hostnames
+for each of its components nginx's virtualhosts.
+
+The hostnames are composed after the variables "CLUSTER" and "DOMAIN" set in
+the `local.params` file.
+
+The virtual hosts' hostnames that will be used are:
+
+* CLUSTER.DOMAIN
+* collections.CLUSTER.DOMAIN
+* download.CLUSTER.DOMAIN
+* keep.CLUSTER.DOMAIN
+* keep0.CLUSTER.DOMAIN
+* webshell.CLUSTER.DOMAIN
+* workbench.CLUSTER.DOMAIN
+* workbench2.CLUSTER.DOMAIN
+* ws.CLUSTER.DOMAIN
similarity index 67%
rename from tools/salt-install/single_host/arvados.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
index a06244270c237150f159220fffaab4de1a9f2f19..23e007650480ab28414b5bbbd4251cd655e75f3b 100644 (file)
@@ -63,10 +63,10 @@ arvados:
     database:
       # max concurrent connections per arvados server daemon
       # connection_pool_max: 32
-      name: arvados
+      name: __CLUSTER___arvados
       host: 127.0.0.1
-      password: changeme_arvados
-      user: arvados
+      password: "__DATABASE_PASSWORD__"
+      user: __CLUSTER___arvados
       encoding: en_US.utf8
       client_encoding: UTF8
 
@@ -78,19 +78,14 @@ arvados:
 
     ### TOKENS
     tokens:
-      system_root: changemesystemroottoken
-      management: changememanagementtoken
-      rails_secret: changemerailssecrettoken
-      anonymous_user: changemeanonymoususertoken
+      system_root: __SYSTEM_ROOT_TOKEN__
+      management: __MANAGEMENT_TOKEN__
+      anonymous_user: __ANONYMOUS_USER_TOKEN__
 
     ### KEYS
     secrets:
-      blob_signing_key: changemeblobsigningkey
-      workbench_secret_key: changemeworkbenchsecretkey
-      dispatcher_access_key: changemedispatcheraccesskey
-      dispatcher_secret_key: changeme_dispatchersecretkey
-      keep_access_key: changemekeepaccesskey
-      keep_secret_key: changemekeepsecretkey
+      blob_signing_key: __BLOB_SIGNING_KEY__
+      workbench_secret_key: __WORKBENCH_SECRET_KEY__
 
     Login:
       Test:
@@ -107,7 +102,7 @@ arvados:
       # <cluster>-nyw5e-<volume>
       __CLUSTER__-nyw5e-000000000000000:
         AccessViaHosts:
-          http://keep0.__CLUSTER__.__DOMAIN__:25107:
+          'http://keep0.__CLUSTER__.__DOMAIN__:25107':
             ReadOnly: false
         Replication: 2
         Driver: Directory
@@ -122,38 +117,38 @@ arvados:
 
     Services:
       Controller:
-        ExternalURL: https://__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
         InternalURLs:
-          http://controller.internal:8003: {}
+          'http://controller.internal:8003': {}
       DispatchCloud:
         InternalURLs:
-          http://__CLUSTER__.__DOMAIN__:9006: {}
+          'http://__CLUSTER__.__DOMAIN__:9006': {}
       Keepbalance:
         InternalURLs:
-          http://__CLUSTER__.__DOMAIN__:9005: {}
+          'http://__CLUSTER__.__DOMAIN__:9005': {}
       Keepproxy:
-        ExternalURL: https://keep.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://keep.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
         InternalURLs:
-          http://keep.internal:25100: {}
+          'http://keep.internal:25100': {}
       Keepstore:
         InternalURLs:
-          http://keep0.__CLUSTER__.__DOMAIN__:25107: {}
+          'http://keep0.__CLUSTER__.__DOMAIN__:25107': {}
       RailsAPI:
         InternalURLs:
-          http://api.internal:8004: {}
+          'http://api.internal:8004': {}
       WebDAV:
-        ExternalURL: https://collections.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://collections.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
         InternalURLs:
-          http://collections.internal:9002: {}
+          'http://collections.internal:9002': {}
       WebDAVDownload:
-        ExternalURL: https://download.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://download.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
       WebShell:
-        ExternalURL: https://webshell.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://webshell.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
       Websocket:
-        ExternalURL: wss://ws.__CLUSTER__.__DOMAIN__/websocket
+        ExternalURL: 'wss://ws.__CLUSTER__.__DOMAIN__/websocket'
         InternalURLs:
-          http://ws.internal:8005: {}
+          'http://ws.internal:8005': {}
       Workbench1:
-        ExternalURL: https://workbench.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://workbench.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
       Workbench2:
-        ExternalURL: https://workbench2.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        ExternalURL: 'https://workbench2.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/docker.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/docker.sls
new file mode 100644 (file)
index 0000000..54d2256
--- /dev/null
@@ -0,0 +1,9 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+docker:
+  pkg:
+    docker:
+      use_upstream: package
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/locale.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/locale.sls
new file mode 100644 (file)
index 0000000..17f53a2
--- /dev/null
@@ -0,0 +1,14 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+locale:
+  present:
+    - "en_US.UTF-8 UTF-8"
+  default:
+    # Note: On debian systems don't write the second 'UTF-8' here or you will
+    # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8
+    # Restart the minion after you corrected this!
+    name: 'en_US.UTF-8'
+    requires: 'en_US.UTF-8 UTF-8'
similarity index 97%
rename from tools/salt-install/single_host/nginx_controller_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls
index 00c3b3a13e6d10f04a4f677e02d86913e3289f17..3adf0580a43647e2919b37c796b39b79d89001e4 100644 (file)
@@ -40,7 +40,7 @@ nginx:
           - server:
             - server_name: __CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://controller_upstream'
similarity index 97%
rename from tools/salt-install/single_host/nginx_keepproxy_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls
index 6554f79a7c44d1f66ac17ce4e4d4b9db4ff7d2e2..2d8922df9a8c727768bd4d57d69c6adab5c0fef3 100644 (file)
@@ -36,7 +36,7 @@ nginx:
           - server:
             - server_name: keep.__CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://keepproxy_upstream'
similarity index 97%
rename from tools/salt-install/single_host/nginx_keepweb_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
index cc871b9da14af308163348d85b4a0afe69b6be24..d180a3bad42e974d7d1796673e5f04df5a94e3ae 100644 (file)
@@ -37,7 +37,7 @@ nginx:
           - server:
             - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://collections_downloads_upstream'
similarity index 98%
rename from tools/salt-install/single_host/nginx_webshell_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls
index a0756b7ce5504df125225017bf16edc3422ef6b3..e75f0443434285785b2b5444f83524f6d94058a5 100644 (file)
@@ -37,7 +37,7 @@ nginx:
           - server:
             - server_name: webshell.__CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /shell.__CLUSTER__.__DOMAIN__:
               - proxy_pass: 'http://webshell_upstream'
similarity index 97%
rename from tools/salt-install/single_host/nginx_websocket_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls
index ebe03f733745b1f168822deb3171e45183bc13b9..3a354ac293de96d93faac2c9013750ac825287aa 100644 (file)
@@ -36,7 +36,7 @@ nginx:
           - server:
             - server_name: ws.__CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://websocket_upstream'
similarity index 93%
rename from tools/salt-install/single_host/nginx_workbench2_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls
index 8930be408cb0f56350ab3af1d1ab071530bf03b5..8fdd553991ed86be5d83adb056e12f6348a9bdee 100644 (file)
@@ -34,7 +34,7 @@ nginx:
           - server:
             - server_name: workbench2.__CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - root: /var/www/arvados-workbench2/workbench2
@@ -42,7 +42,7 @@ nginx:
               - 'if (-f $document_root/maintenance.html)':
                 - return: 503
             - location /config.json:
-              - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__"}' ~ "'" }}
+              - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__"}' ~ "'" }}
             - include: 'snippets/arvados-snakeoil.conf'
             - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
similarity index 97%
rename from tools/salt-install/single_host/nginx_workbench_configuration.sls
rename to tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls
index be571ca77e84ba6208175a431b051a8b72bd5926..649af10b6d8b5c497b5cde653df2aef2e86e0f6a 100644 (file)
@@ -43,7 +43,7 @@ nginx:
           - server:
             - server_name: workbench.__CLUSTER__.__DOMAIN__
             - listen:
-              - __HOST_SSL_PORT__ http2 ssl
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://workbench_upstream'
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls
new file mode 100644 (file)
index 0000000..71e712c
--- /dev/null
@@ -0,0 +1,42 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### POSTGRESQL
+postgres:
+  use_upstream_repo: false
+  pkgs_extra:
+    - postgresql-contrib
+  postgresconf: |-
+    listen_addresses = '*'  # listen on all interfaces
+  acls:
+    - ['local', 'all', 'postgres', 'peer']
+    - ['local', 'all', 'all', 'peer']
+    - ['host', 'all', 'all', '127.0.0.1/32', 'md5']
+    - ['host', 'all', 'all', '::1/128', 'md5']
+    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']
+  users:
+    __CLUSTER___arvados:
+      ensure: present
+      password: __DATABASE_PASSWORD__
+
+  # tablespaces:
+  #   arvados_tablespace:
+  #     directory: /path/to/some/tbspace/arvados_tbsp
+  #     owner: arvados
+
+  databases:
+    __CLUSTER___arvados:
+      owner: __CLUSTER___arvados
+      template: template0
+      lc_ctype: en_US.utf8
+      lc_collate: en_US.utf8
+      # tablespace: arvados_tablespace
+      schemas:
+        public:
+          owner: __CLUSTER___arvados
+      extensions:
+        pg_trgm:
+          if_not_exists: true
+          schema: public
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls
new file mode 100644 (file)
index 0000000..379f476
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+{%- set curr_tpldir = tpldir %}
+{%- set tpldir = 'arvados' %}
+{%- from "arvados/map.jinja" import arvados with context %}
+{%- set tpldir = curr_tpldir %}
+
+arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
+  host.present:
+    - ip: 127.0.1.1
+    - names:
+      - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+      # FIXME! This just works for our testings.
+      # Won't work if the cluster name != host name
+      {%- for entry in [
+          'api',
+          'collections',
+          'controller',
+          'download',
+          'keep',
+          'keepweb',
+          'keep0',
+          'shell',
+          'workbench',
+          'workbench2',
+          'ws',
+        ]
+      %}
+      - {{ entry }}
+      - {{ entry }}.internal
+      - {{ entry }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+      {%- endfor %}
+    - require_in:
+      - file: nginx_config
+      - service: nginx_service
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls
new file mode 100644 (file)
index 0000000..466d41d
--- /dev/null
@@ -0,0 +1,158 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+{%- set curr_tpldir = tpldir %}
+{%- set tpldir = 'arvados' %}
+{%- from "arvados/map.jinja" import arvados with context %}
+{%- set tpldir = curr_tpldir %}
+
+{%- set arvados_ca_cert_file = '/etc/ssl/certs/arvados-snakeoil-ca.pem' %}
+{%- set arvados_ca_key_file = '/etc/ssl/private/arvados-snakeoil-ca.key' %}
+{%- set arvados_cert_file = '/etc/ssl/certs/arvados-snakeoil-cert.pem' %}
+{%- set arvados_csr_file = '/etc/ssl/private/arvados-snakeoil-cert.csr' %}
+{%- set arvados_key_file = '/etc/ssl/private/arvados-snakeoil-cert.key' %}
+
+{%- if grains.get('os_family') == 'Debian' %}
+  {%- set arvados_ca_cert_dest = '/usr/local/share/ca-certificates/arvados-snakeoil-ca.crt' %}
+  {%- set update_ca_cert = '/usr/sbin/update-ca-certificates' %}
+  {%- set openssl_conf = '/etc/ssl/openssl.cnf' %}
+{%- else %}
+  {%- set arvados_ca_cert_dest = '/etc/pki/ca-trust/source/anchors/arvados-snakeoil-ca.pem' %}
+  {%- set update_ca_cert = '/usr/bin/update-ca-trust' %}
+  {%- set openssl_conf = '/etc/pki/tls/openssl.cnf' %}
+{%- endif %}
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed:
+  pkg.installed:
+    - pkgs:
+      - openssl
+      - ca-certificates
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run:
+  # Taken from https://github.com/arvados/arvados/blob/master/tools/arvbox/lib/arvbox/docker/service/certificate/run
+  cmd.run:
+    - name: |
+        # These dirs are not to CentOS-ish, but this is a helper script
+        # and they should be enough
+        mkdir -p /etc/ssl/certs/ /etc/ssl/private/ && \
+        openssl req \
+          -new \
+          -nodes \
+          -sha256 \
+          -x509 \
+          -subj "/C=CC/ST=Some State/O=Arvados Formula/OU=arvados-formula/CN=snakeoil-ca-{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}" \
+          -extensions x509_ext \
+          -config <(cat {{ openssl_conf }} \
+                  <(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
+          -out {{ arvados_ca_cert_file }} \
+          -keyout {{ arvados_ca_key_file }} \
+          -days 365 && \
+        cp {{ arvados_ca_cert_file }} {{ arvados_ca_cert_dest }} && \
+        {{ update_ca_cert }}
+    - unless:
+      - test -f {{ arvados_ca_cert_file }}
+      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_ca_cert_file }}
+    - require:
+      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run:
+  cmd.run:
+    - name: |
+        cat > /tmp/openssl.cnf <<-CNF
+        [req]
+        default_bits = 2048
+        prompt = no
+        default_md = sha256
+        req_extensions = rext
+        distinguished_name = dn
+        [dn]
+        C   = CC
+        ST  = Some State
+        L   = Some Location
+        O   = Arvados Formula
+        OU  = arvados-formula
+        CN  = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        emailAddress = admin@{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        [rext]
+        subjectAltName = @alt_names
+        [alt_names]
+        {%- for entry in grains.get('ipv4') %}
+        IP.{{ loop.index }} = {{ entry }}
+        {%- endfor %}
+        {%- for entry in [
+            'keep',
+            'collections',
+            'download',
+            'ws',
+            'workbench',
+            'workbench2',
+          ]
+        %}
+        DNS.{{ loop.index }} = {{ entry }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        {%- endfor %}
+        DNS.7 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        CNF
+
+        # The req
+        openssl req \
+          -config /tmp/openssl.cnf \
+          -new \
+          -nodes \
+          -sha256 \
+          -out {{ arvados_csr_file }} \
+          -keyout {{ arvados_key_file }} > /tmp/snake_oil_certs.output 2>&1 && \
+        # The cert
+        openssl x509 \
+          -req \
+          -days 365 \
+          -in {{ arvados_csr_file }} \
+          -out {{ arvados_cert_file }} \
+          -extfile /tmp/openssl.cnf \
+          -extensions rext \
+          -CA {{ arvados_ca_cert_file }} \
+          -CAkey {{ arvados_ca_key_file }} \
+          -set_serial $(date +%s) && \
+        chmod 0644 {{ arvados_cert_file }} && \
+        chmod 0640 {{ arvados_key_file }}
+    - unless:
+      - test -f {{ arvados_key_file }}
+      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_cert_file }}
+    - require:
+      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed
+      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run
+
+{%- if grains.get('os_family') == 'Debian' %}
+arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed:
+  pkg.installed:
+    - name: ssl-cert
+    - require_in:
+      - sls: postgres
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run:
+  cmd.run:
+    - name: |
+        chown root:ssl-cert {{ arvados_key_file }}
+    - require:
+      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run
+      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed
+{%- endif %}
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_nginx_snakeoil_file_managed:
+  file.managed:
+    - name: /etc/nginx/snippets/arvados-snakeoil.conf
+    - contents: |
+        ssl_certificate {{ arvados_cert_file }};
+        ssl_certificate_key {{ arvados_key_file }};
+    - watch_in:
+      - service: nginx_service
+    - require:
+      - pkg: passenger_install
+      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run
+    - require_in:
+      - file: nginx_config
+      - service: nginx_service
+    - watch_in:
+      - service: nginx_service
+
+
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/README.md b/tools/salt-install/config_examples/single_host/single_hostname/README.md
new file mode 100644 (file)
index 0000000..9c7ab96
--- /dev/null
@@ -0,0 +1,23 @@
+Single host with a single hostname
+==================================
+
+These files let you setup Arvados on a single host using a single hostname
+for all of its components nginx's virtualhosts.
+
+The hostname MUST be given in the `local.params` file. The script won't try
+to guess it because, depending on the network architecture where you're
+installing Arvados, things might not work as expected.
+
+The services will be available on the same hostname but different ports,
+which can be given on the `local.params` file or will default to the following
+values:
+
+* CLUSTER.DOMAIN
+* collections
+* download
+* keep
+* keep0
+* webshell
+* workbench
+* workbench2
+* ws
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
new file mode 100644 (file)
index 0000000..a45ac8d
--- /dev/null
@@ -0,0 +1,148 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# The variables commented out are the default values that the formula uses.
+# The uncommented values are REQUIRED values. If you don't set them, running
+# this formula will fail.
+arvados:
+  ### GENERAL CONFIG
+  version: '__VERSION__'
+  ## It makes little sense to disable this flag, but you can, if you want :)
+  # use_upstream_repo: true
+
+  ## Repo URL is built with grains values. If desired, it can be completely
+  ## overwritten with the pillar parameter 'repo_url'
+  # repo:
+  #   humanname: Arvados Official Repository
+
+  release: __RELEASE__
+
+  ## IMPORTANT!!!!!
+  ## api, workbench and shell require some gems, so you need to make sure ruby
+  ## and deps are installed in order to install and compile the gems.
+  ## We default to `false` in these two variables as it's expected you already
+  ## manage OS packages with some other tool and you don't want us messing up
+  ## with your setup.
+  ruby:
+    ## We set these to `true` here for testing purposes.
+    ## They both default to `false`.
+    manage_ruby: true
+    manage_gems_deps: true
+    # pkg: ruby
+    # gems_deps:
+    #     - curl
+    #     - g++
+    #     - gcc
+    #     - git
+    #     - libcurl4
+    #     - libcurl4-gnutls-dev
+    #     - libpq-dev
+    #     - libxml2
+    #     - libxml2-dev
+    #     - make
+    #     - python3-dev
+    #     - ruby-dev
+    #     - zlib1g-dev
+
+  # config:
+  #   file: /etc/arvados/config.yml
+  #   user: root
+  ## IMPORTANT!!!!!
+  ## If you're intalling any of the rails apps (api, workbench), the group
+  ## should be set to that of the web server, usually `www-data`
+  #   group: root
+  #   mode: 640
+
+  ### ARVADOS CLUSTER CONFIG
+  cluster:
+    name: __CLUSTER__
+    domain: __DOMAIN__
+
+    database:
+      # max concurrent connections per arvados server daemon
+      # connection_pool_max: 32
+      name: __CLUSTER___arvados
+      host: 127.0.0.1
+      password: "__DATABASE_PASSWORD__"
+      user: __CLUSTER___arvados
+      encoding: en_US.utf8
+
+    tls:
+      # certificate: ''
+      # key: ''
+      # required to test with arvados-snakeoil certs
+      insecure: true
+
+    ### TOKENS
+    tokens:
+      system_root: __SYSTEM_ROOT_TOKEN__
+      management: __MANAGEMENT_TOKEN__
+      anonymous_user: __ANONYMOUS_USER_TOKEN__
+      rails_secret: YDLxHf4GqqmLXYAMgndrAmFEdqgC0sBqX7TEjMN2rw9D6EVwgx
+
+    ### KEYS
+    secrets:
+      blob_signing_key: __BLOB_SIGNING_KEY__
+      workbench_secret_key: __WORKBENCH_SECRET_KEY__
+
+    Login:
+      Test:
+        Enable: true
+        Users:
+          __INITIAL_USER__:
+            Email: __INITIAL_USER_EMAIL__
+            Password: __INITIAL_USER_PASSWORD__
+
+    ### VOLUMES
+    ## This should usually match all your `keepstore` instances
+    Volumes:
+      # the volume name will be composed with
+      # <cluster>-nyw5e-<volume>
+      __CLUSTER__-nyw5e-000000000000000:
+        AccessViaHosts:
+          'http://__HOSTNAME_INT__:25107':
+            ReadOnly: false
+        Replication: 2
+        Driver: Directory
+        DriverParameters:
+          Root: /tmp
+
+    Users:
+      NewUsersAreActive: true
+      AutoAdminFirstUser: true
+      AutoSetupNewUsers: true
+      AutoSetupNewUsersWithRepository: true
+
+    Services:
+      Controller:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__CONTROLLER_EXT_SSL_PORT__'
+        InternalURLs:
+          'http://__HOSTNAME_INT__:8003': {}
+      Keepproxy:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__KEEP_EXT_SSL_PORT__'
+        InternalURLs:
+          'http://__HOSTNAME_INT__:25100': {}
+      Keepstore:
+        InternalURLs:
+          'http://__HOSTNAME_INT__:25107': {}
+      RailsAPI:
+        InternalURLs:
+          'http://__HOSTNAME_INT__:8004': {}
+      WebDAV:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'
+        InternalURLs:
+          'http://__HOSTNAME_INT__:9003': {}
+      WebDAVDownload:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'
+      WebShell:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__WEBSHELL_EXT_SSL_PORT__'
+      Websocket:
+        ExternalURL: 'wss://__HOSTNAME_EXT__:__WEBSOCKET_EXT_SSL_PORT__/websocket'
+        InternalURLs:
+          'http://__HOSTNAME_INT__:8005': {}
+      Workbench1:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__WORKBENCH1_EXT_SSL_PORT__'
+      Workbench2:
+        ExternalURL: 'https://__HOSTNAME_EXT__:__WORKBENCH2_EXT_SSL_PORT__'
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/docker.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/docker.sls
new file mode 100644 (file)
index 0000000..54d2256
--- /dev/null
@@ -0,0 +1,9 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+docker:
+  pkg:
+    docker:
+      use_upstream: package
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/locale.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/locale.sls
new file mode 100644 (file)
index 0000000..17f53a2
--- /dev/null
@@ -0,0 +1,14 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+locale:
+  present:
+    - "en_US.UTF-8 UTF-8"
+  default:
+    # Note: On debian systems don't write the second 'UTF-8' here or you will
+    # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8
+    # Restart the minion after you corrected this!
+    name: 'en_US.UTF-8'
+    requires: 'en_US.UTF-8 UTF-8'
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_api_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_api_configuration.sls
new file mode 100644 (file)
index 0000000..18f09af
--- /dev/null
@@ -0,0 +1,28 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SITES
+  servers:
+    managed:
+      arvados_api:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - listen: '__HOSTNAME_INT__:8004'
+            - server_name: api
+            - root: /var/www/arvados-api/current/public
+            - index:  index.html index.htm
+            - access_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+            - error_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.error.log
+            - passenger_enabled: 'on'
+            - client_max_body_size: 128m
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_controller_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_controller_configuration.sls
new file mode 100644 (file)
index 0000000..b7b75ab
--- /dev/null
@@ -0,0 +1,58 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        'geo $external_client':
+          default: 1
+          '127.0.0.0/8': 0
+        upstream controller_upstream:
+          - server: '__HOSTNAME_INT__:8003  fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_controller_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: _
+            - listen:
+              - 80 default_server
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_controller_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl default_server
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://controller_upstream'
+              - proxy_read_timeout: 300
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_set_header: 'X-External-Client $external_client'
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.error.log
+            - client_max_body_size: 128m
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepproxy_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepproxy_configuration.sls
new file mode 100644 (file)
index 0000000..81d72aa
--- /dev/null
@@ -0,0 +1,43 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream keepproxy_upstream:
+          - server: '__HOSTNAME_INT__:25100 fail_timeout=10s'
+
+  servers:
+    managed:
+      arvados_keepproxy_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __KEEP_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://keepproxy_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_body_buffer_size: 64M
+            - client_max_body_size: 64M
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepweb_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepweb_configuration.sls
new file mode 100644 (file)
index 0000000..fcb56c9
--- /dev/null
@@ -0,0 +1,43 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream collections_downloads_upstream:
+          - server: '__HOSTNAME_INT__:9003 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### COLLECTIONS / DOWNLOAD
+      arvados_collections_download_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __KEEPWEB_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://collections_downloads_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_max_body_size: 0
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/keepweb.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/keepweb.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls
new file mode 100644 (file)
index 0000000..6ce75fa
--- /dev/null
@@ -0,0 +1,24 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  install_from_phusionpassenger: true
+  lookup:
+    passenger_package: libnginx-mod-http-passenger
+    passenger_config_file: /etc/nginx/conf.d/mod-http-passenger.conf
+
+  ### SERVER
+  server:
+    config:
+      include: 'modules-enabled/*.conf'
+      worker_processes: 4
+
+  ### SITES
+  servers:
+    managed:
+      # Remove default webserver
+      default:
+        enabled: false
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_webshell_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_webshell_configuration.sls
new file mode 100644 (file)
index 0000000..1b21aaa
--- /dev/null
@@ -0,0 +1,61 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+
+      ### STREAMS
+      http:
+        upstream webshell_upstream:
+          - server: '__HOSTNAME_INT__:4200 fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      arvados_webshell_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __WEBSHELL_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /__HOSTNAME_EXT__:
+              - proxy_pass: 'http://webshell_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_ssl_session_reuse: 'off'
+
+              - "if ($request_method = 'OPTIONS')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+                - add_header: "'Access-Control-Max-Age' 1728000"
+                - add_header: "'Content-Type' 'text/plain charset=UTF-8'"
+                - add_header: "'Content-Length' 0"
+                - return: 204
+
+              - "if ($request_method = 'POST')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+              - "if ($request_method = 'GET')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log
+
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_websocket_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_websocket_configuration.sls
new file mode 100644 (file)
index 0000000..7c4ff78
--- /dev/null
@@ -0,0 +1,44 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream websocket_upstream:
+          - server: '__HOSTNAME_INT__:8005 fail_timeout=10s'
+
+  servers:
+    managed:
+      arvados_websocket_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __WEBSOCKET_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://websocket_upstream'
+              - proxy_read_timeout: 600
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: 'Host $host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'Upgrade $http_upgrade'
+              - proxy_set_header: 'Connection "upgrade"'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_body_buffer_size: 64M
+            - client_max_body_size: 64M
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench2_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench2_configuration.sls
new file mode 100644 (file)
index 0000000..462443c
--- /dev/null
@@ -0,0 +1,34 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SITES
+  servers:
+    managed:
+      arvados_workbench2_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __WORKBENCH2_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - root: /var/www/arvados-workbench2/workbench2
+              - try_files: '$uri $uri/ /index.html'
+              - 'if (-f $document_root/maintenance.html)':
+                - return: 503
+            - location /config.json:
+              - return: {{ "200 '" ~ '{"API_HOST":"__HOSTNAME_EXT__:__CONTROLLER_EXT_SSL_PORT__"}' ~ "'" }}
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench_configuration.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench_configuration.sls
new file mode 100644 (file)
index 0000000..9ed6e3b
--- /dev/null
@@ -0,0 +1,59 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+
+      ### STREAMS
+      http:
+        upstream workbench_upstream:
+          - server: '__HOSTNAME_INT__:9000 fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      arvados_workbench_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __HOSTNAME_EXT__
+            - listen:
+              - __WORKBENCH1_EXT_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://workbench_upstream'
+              - proxy_read_timeout: 300
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+            - include: 'snippets/arvados-snakeoil.conf'
+            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log
+
+      arvados_workbench_upstream:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - listen: '__HOSTNAME_INT__:9000'
+            - server_name: workbench
+            - root: /var/www/arvados-workbench/current/public
+            - index:  index.html index.htm
+            - passenger_enabled: 'on'
+            # yamllint disable-line rule:line-length
+            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.error.log
similarity index 78%
rename from tools/salt-install/single_host/postgresql.sls
rename to tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls
index 56b0a42e8b96578a14e5898031c746a10c173b22..caafb7b2d784d480dfc726572825de456d128737 100644 (file)
@@ -15,11 +15,11 @@ postgres:
     - ['local', 'all', 'all', 'peer']
     - ['host', 'all', 'all', '127.0.0.1/32', 'md5']
     - ['host', 'all', 'all', '::1/128', 'md5']
-    - ['host', 'arvados', 'arvados', '127.0.0.1/32']
+    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.0/8']
   users:
-    arvados:
+    __CLUSTER___arvados:
       ensure: present
-      password: changeme_arvados
+      password: __DATABASE_PASSWORD__
 
   # tablespaces:
   #   arvados_tablespace:
@@ -27,15 +27,15 @@ postgres:
   #     owner: arvados
 
   databases:
-    arvados:
-      owner: arvados
+    __CLUSTER___arvados:
+      owner: __CLUSTER___arvados
       template: template0
       lc_ctype: en_US.utf8
       lc_collate: en_US.utf8
       # tablespace: arvados_tablespace
       schemas:
         public:
-          owner: arvados
+          owner: __CLUSTER___arvados
       extensions:
         pg_trgm:
           if_not_exists: true
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls b/tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls
new file mode 100644 (file)
index 0000000..53a9148
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+{%- set curr_tpldir = tpldir %}
+{%- set tpldir = 'arvados' %}
+{%- from "arvados/map.jinja" import arvados with context %}
+{%- set tpldir = curr_tpldir %}
+
+arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
+  host.present:
+    - ip: 127.0.1.1
+    - names:
+      - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+      # FIXME! This just works for our testings.
+      # Won't work if the cluster name != host name
+      {%- for entry in [
+          'api',
+          'collections',
+          'controller',
+          'download',
+          'keep',
+          'keepweb',
+          'keep0',
+          'shell',
+          'workbench',
+          'workbench2',
+          'ws',
+        ]
+      %}
+      - {{ entry }}
+      - {{ entry }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+      {%- endfor %}
+    - require_in:
+      - file: nginx_config
+      - service: nginx_service
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/states/snakeoil_certs.sls b/tools/salt-install/config_examples/single_host/single_hostname/states/snakeoil_certs.sls
new file mode 100644 (file)
index 0000000..d88adbc
--- /dev/null
@@ -0,0 +1,158 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+{%- set curr_tpldir = tpldir %}
+{%- set tpldir = 'arvados' %}
+{%- from "arvados/map.jinja" import arvados with context %}
+{%- set tpldir = curr_tpldir %}
+
+{%- set arvados_ca_cert_file = '/etc/ssl/certs/arvados-snakeoil-ca.pem' %}
+{%- set arvados_ca_key_file = '/etc/ssl/private/arvados-snakeoil-ca.key' %}
+{%- set arvados_cert_file = '/etc/ssl/certs/arvados-snakeoil-cert.pem' %}
+{%- set arvados_csr_file = '/etc/ssl/private/arvados-snakeoil-cert.csr' %}
+{%- set arvados_key_file = '/etc/ssl/private/arvados-snakeoil-cert.key' %}
+
+{%- if grains.get('os_family') == 'Debian' %}
+  {%- set arvados_ca_cert_dest = '/usr/local/share/ca-certificates/arvados-snakeoil-ca.crt' %}
+  {%- set update_ca_cert = '/usr/sbin/update-ca-certificates' %}
+  {%- set openssl_conf = '/etc/ssl/openssl.cnf' %}
+{%- else %}
+  {%- set arvados_ca_cert_dest = '/etc/pki/ca-trust/source/anchors/arvados-snakeoil-ca.pem' %}
+  {%- set update_ca_cert = '/usr/bin/update-ca-trust' %}
+  {%- set openssl_conf = '/etc/pki/tls/openssl.cnf' %}
+{%- endif %}
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed:
+  pkg.installed:
+    - pkgs:
+      - openssl
+      - ca-certificates
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run:
+  # Taken from https://github.com/arvados/arvados/blob/master/tools/arvbox/lib/arvbox/docker/service/certificate/run
+  cmd.run:
+    - name: |
+        # These dirs are not to CentOS-ish, but this is a helper script
+        # and they should be enough
+        mkdir -p /etc/ssl/certs/ /etc/ssl/private/ && \
+        openssl req \
+          -new \
+          -nodes \
+          -sha256 \
+          -x509 \
+          -subj "/C=CC/ST=Some State/O=Arvados Formula/OU=arvados-formula/CN=snakeoil-ca-{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}" \
+          -extensions x509_ext \
+          -config <(cat {{ openssl_conf }} \
+                  <(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
+          -out {{ arvados_ca_cert_file }} \
+          -keyout {{ arvados_ca_key_file }} \
+          -days 365 && \
+        cp {{ arvados_ca_cert_file }} {{ arvados_ca_cert_dest }} && \
+        {{ update_ca_cert }}
+    - unless:
+      - test -f {{ arvados_ca_cert_file }}
+      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_ca_cert_file }}
+    - require:
+      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run:
+  cmd.run:
+    - name: |
+        cat > /tmp/openssl.cnf <<-CNF
+        [req]
+        default_bits = 2048
+        prompt = no
+        default_md = sha256
+        req_extensions = rext
+        distinguished_name = dn
+        [dn]
+        C   = CC
+        ST  = Some State
+        L   = Some Location
+        O   = Arvados Formula
+        OU  = arvados-formula
+        CN  = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        emailAddress = admin@{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        [rext]
+        subjectAltName = @alt_names
+        [alt_names]
+        {%- for entry in grains.get('ipv4') %}
+        IP.{{ loop.index }} = {{ entry }}
+        {%- endfor %}
+        {%- for entry in [
+            'keep',
+            'collections',
+            'download',
+            'keepweb',
+            'ws',
+            'workbench',
+            'workbench2',
+          ]
+        %}
+        DNS.{{ loop.index }} = {{ entry }}
+        {%- endfor %}
+        DNS.8 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        DNS.9 = '__HOSTNAME_EXT__'
+        DNS.10 = '__HOSTNAME_INT__'
+        CNF
+
+        # The req
+        openssl req \
+          -config /tmp/openssl.cnf \
+          -new \
+          -nodes \
+          -sha256 \
+          -out {{ arvados_csr_file }} \
+          -keyout {{ arvados_key_file }} > /tmp/snake_oil_certs.output 2>&1 && \
+        # The cert
+        openssl x509 \
+          -req \
+          -days 365 \
+          -in {{ arvados_csr_file }} \
+          -out {{ arvados_cert_file }} \
+          -extfile /tmp/openssl.cnf \
+          -extensions rext \
+          -CA {{ arvados_ca_cert_file }} \
+          -CAkey {{ arvados_ca_key_file }} \
+          -set_serial $(date +%s) && \
+        chmod 0644 {{ arvados_cert_file }} && \
+        chmod 0640 {{ arvados_key_file }}
+    - unless:
+      - test -f {{ arvados_key_file }}
+      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_cert_file }}
+    - require:
+      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed
+      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run
+
+{%- if grains.get('os_family') == 'Debian' %}
+arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed:
+  pkg.installed:
+    - name: ssl-cert
+    - require_in:
+      - sls: postgres
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run:
+  cmd.run:
+    - name: |
+        chown root:ssl-cert {{ arvados_key_file }}
+    - require:
+      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run
+      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed
+{%- endif %}
+
+arvados_test_salt_states_examples_single_host_snakeoil_certs_nginx_snakeoil_file_managed:
+  file.managed:
+    - name: /etc/nginx/snippets/arvados-snakeoil.conf
+    - contents: |
+        ssl_certificate {{ arvados_cert_file }};
+        ssl_certificate_key {{ arvados_key_file }};
+    - require:
+      - pkg: nginx_install
+    - require_in:
+      - file: nginx_config
+      - service: nginx_service
+    - watch_in:
+      - service: nginx_service
+
+
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
new file mode 100644 (file)
index 0000000..6a5bc17
--- /dev/null
@@ -0,0 +1,91 @@
+##########################################################
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# These are the basic parameters to configure the installation
+
+# The FIVE ALPHANUMERIC CHARACTERS name you want to give your cluster
+CLUSTER="cluster_fixme_or_this_wont_work"
+
+# The domainname you want tou give to your cluster's hosts
+DOMAIN="domain_fixme_or_this_wont_work"
+
+# Host SSL port where you want to point your browser to access Arvados
+# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
+# You can point it to another port if desired
+# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
+CONTROLLER_EXT_SSL_PORT=443
+KEEP_EXT_SSL_PORT=443
+# Both for collections and downloads
+KEEPWEB_EXT_SSL_PORT=443
+WEBSHELL_EXT_SSL_PORT=443
+WEBSOCKET_EXT_SSL_PORT=443
+WORKBENCH1_EXT_SSL_PORT=443
+WORKBENCH2_EXT_SSL_PORT=443
+
+# Internal IPs for the configuration
+CLUSTER_INT_CIDR=10.0.0.0/16
+CONTROLLER_INT_IP=10.0.0.1
+WEBSOCKET_INT_IP=10.0.0.1
+KEEP_INT_IP=10.0.0.2
+KEEPSTORE0_INT_IP=10.0.0.3
+KEEPSTORE1_INT_IP=10.0.0.4
+# Both for collections and downloads
+KEEPWEB_INT_IP=10.0.0.5
+WEBSHELL_INT_IP=10.0.0.6
+WORKBENCH1_INT_IP=10.0.0.7
+WORKBENCH2_INT_IP=10.0.0.7
+DATABASE_INT_IP=10.0.0.8
+
+INITIAL_USER="admin"
+INITIAL_USER_PASSWORD="password"
+
+# If not specified, the initial user email will be composed as
+# INITIAL_USER@CLUSTER.DOMAIN
+INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
+INITIAL_USER_PASSWORD="password"
+
+# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
+BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=please_set_this_to_some_secure_value
+
+# SSL CERTIFICATES
+# Arvados REQUIRES valid SSL to work correctly. Otherwise, some components will fail
+# to communicate and can silently drop traffic. You can try to use the Letsencrypt
+# salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to
+# automatically obtain and install SSL certificates for your instances or set this
+# variable to "no", provide and upload your own certificates to the instances and
+# modify the 'nginx_*' salt pillars accordingly
+USE_LETSENCRYPT="yes"
+
+# The directory to check for the config files (pillars, states) you want to use.
+# There are a few examples under 'config_examples'.
+# CONFIG_DIR="local_config_dir"
+# Extra states to apply. If you use your own subdir, change this value accordingly
+# EXTRA_STATES_DIR="${CONFIG_DIR}/states"
+
+# Which release of Arvados repo you want to use
+RELEASE="production"
+# Which version of Arvados you want to install. Defaults to latest stable
+# VERSION="2.1.2-1"
+
+# This is an arvados-formula setting.
+# If branch is set, the script will switch to it before running salt
+# Usually not needed, only used for testing
+# BRANCH="master"
+
+##########################################################
+# Usually there's no need to modify things below this line
+
+# Formulas versions
+# ARVADOS_TAG="v1.1.4"
+# POSTGRES_TAG="v0.41.6"
+# NGINX_TAG="temp-fix-missing-statements-in-pillar"
+# DOCKER_TAG="v1.0.0"
+# LOCALE_TAG="v0.3.4"
+# LETSENCRYPT_TAG="v2.1.0"
diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames
new file mode 100644 (file)
index 0000000..6dd4772
--- /dev/null
@@ -0,0 +1,76 @@
+##########################################################
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# These are the basic parameters to configure the installation
+
+# The FIVE ALPHANUMERIC CHARACTERS name you want to give your cluster
+CLUSTER="cluster_fixme_or_this_wont_work"
+
+# The domainname you want tou give to your cluster's hosts
+DOMAIN="domain_fixme_or_this_wont_work"
+
+# Host SSL port where you want to point your browser to access Arvados
+# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
+# You can point it to another port if desired
+# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
+CONTROLLER_EXT_SSL_PORT=443
+KEEP_EXT_SSL_PORT=25101
+# Both for collections and downloads
+KEEPWEB_EXT_SSL_PORT=9002
+WEBSHELL_EXT_SSL_PORT=4202
+WEBSOCKET_EXT_SSL_PORT=8002
+WORKBENCH1_EXT_SSL_PORT=443
+WORKBENCH2_EXT_SSL_PORT=3001
+
+INITIAL_USER="admin"
+
+# If not specified, the initial user email will be composed as
+# INITIAL_USER@CLUSTER.DOMAIN
+INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
+INITIAL_USER_PASSWORD="password"
+
+# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
+BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=please_set_this_to_some_secure_value
+
+# SSL CERTIFICATES
+# Arvados REQUIRES valid SSL to work correctly. Otherwise, some components will fail
+# to communicate and can silently drop traffic. You can try to use the Letsencrypt
+# salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to
+# automatically obtain and install SSL certificates for your instances or set this
+# variable to "no", provide and upload your own certificates to the instances and
+# modify the 'nginx_*' salt pillars accordingly
+USE_LETSENCRYPT="no"
+
+# The directory to check for the config files (pillars, states) you want to use.
+# There are a few examples under 'config_examples'.
+# CONFIG_DIR="local_config_dir"
+# Extra states to apply. If you use your own subdir, change this value accordingly
+# EXTRA_STATES_DIR="${CONFIG_DIR}/states"
+
+# Which release of Arvados repo you want to use
+RELEASE="production"
+# Which version of Arvados you want to install. Defaults to latest stable
+# VERSION="2.1.2-1"
+
+# This is an arvados-formula setting.
+# If branch is set, the script will switch to it before running salt
+# Usually not needed, only used for testing
+# BRANCH="master"
+
+##########################################################
+# Usually there's no need to modify things below this line
+
+# Formulas versions
+# ARVADOS_TAG="v1.1.4"
+# POSTGRES_TAG="v0.41.6"
+# NGINX_TAG="temp-fix-missing-statements-in-pillar"
+# DOCKER_TAG="v1.0.0"
+# LOCALE_TAG="v0.3.4"
+# LETSENCRYPT_TAG="v2.1.0"
diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname
new file mode 100644 (file)
index 0000000..fda42a9
--- /dev/null
@@ -0,0 +1,85 @@
+##########################################################
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# These are the basic parameters to configure the installation
+
+# The FIVE ALPHANUMERIC CHARACTERS name you want to give your cluster
+CLUSTER="cluster_fixme_or_this_wont_work"
+
+# The domainname you want tou give to your cluster's hosts
+DOMAIN="domain_fixme_or_this_wont_work"
+
+# Set this value when installing a cluster in a single host with a single hostname
+# to access all the instances. Not used in the other examples.
+# When using virtualization (ie AWS), this should be
+# the EXTERNAL/PUBLIC hostname for the instance.
+# If empty, ${CLUSTER}.${DOMAIN} will be used
+HOSTNAME_EXT=""
+# The internal hostname for the host. In the example files, only used in the
+# single_host/single_hostname example
+HOSTNAME_INT="127.0.1.1"
+# Host SSL port where you want to point your browser to access Arvados
+# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
+# You can point it to another port if desired
+# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)
+CONTROLLER_EXT_SSL_PORT=9443
+KEEP_EXT_SSL_PORT=35101
+# Both for collections and downloads
+KEEPWEB_EXT_SSL_PORT=11002
+WEBSHELL_EXT_SSL_PORT=14202
+WEBSOCKET_EXT_SSL_PORT=18002
+WORKBENCH1_EXT_SSL_PORT=9444
+WORKBENCH2_EXT_SSL_PORT=9445
+
+INITIAL_USER="admin"
+
+# If not specified, the initial user email will be composed as
+# INITIAL_USER@CLUSTER.DOMAIN
+INITIAL_USER_EMAIL="admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
+INITIAL_USER_PASSWORD="password"
+
+# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
+BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=please_set_this_to_some_secure_value
+
+# SSL CERTIFICATES
+# Arvados REQUIRES valid SSL to work correctly. Otherwise, some components will fail
+# to communicate and can silently drop traffic. You can try to use the Letsencrypt
+# salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to
+# automatically obtain and install SSL certificates for your instances or set this
+# variable to "no", provide and upload your own certificates to the instances and
+# modify the 'nginx_*' salt pillars accordingly
+USE_LETSENCRYPT="no"
+
+# The directory to check for the config files (pillars, states) you want to use.
+# There are a few examples under 'config_examples'.
+# CONFIG_DIR="local_config_dir"
+# Extra states to apply. If you use your own subdir, change this value accordingly
+# EXTRA_STATES_DIR="${CONFIG_DIR}/states"
+
+# Which release of Arvados repo you want to use
+RELEASE="production"
+# Which version of Arvados you want to install. Defaults to latest stable
+# VERSION="2.1.2-1"
+
+# This is an arvados-formula setting.
+# If branch is set, the script will switch to it before running salt
+# Usually not needed, only used for testing
+# BRANCH="master"
+
+##########################################################
+# Usually there's no need to modify things below this line
+
+# Formulas versions
+# ARVADOS_TAG="v1.1.4"
+# POSTGRES_TAG="v0.41.6"
+# NGINX_TAG="temp-fix-missing-statements-in-pillar"
+# DOCKER_TAG="v1.0.0"
+# LOCALE_TAG="v0.3.4"
+# LETSENCRYPT_TAG="v2.1.0"
index 31266c1b8f11ab5c02ccca6989970b3b3efa6975..02da9933bdeab991415f4956257f5a9c22a5abf4 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -x
 
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 #
 # vagrant up
 
-##########################################################
-# This section are the basic parameters to configure the installation
-
-# The 5 letters name you want to give your cluster
-CLUSTER="arva2"
-DOMAIN="arv.local"
-
-INITIAL_USER="admin"
-
-# If not specified, the initial user email will be composed as
-# INITIAL_USER@CLUSTER.DOMAIN
-INITIAL_USER_EMAIL="${INITIAL_USER}@${CLUSTER}.${DOMAIN}"
-INITIAL_USER_PASSWORD="password"
-
-# The example config you want to use. Currently, only "single_host" is
-# available
-CONFIG_DIR="single_host"
-
-# Which release of Arvados repo you want to use
-RELEASE="production"
-# Which version of Arvados you want to install. Defaults to 'latest'
-# in the desired repo
-VERSION="latest"
-
-# Host SSL port where you want to point your browser to access Arvados
-# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
-# You can point it to another port if desired
-# In Vagrant, make sure it matches what you set in the Vagrantfile
-# HOST_SSL_PORT=443
-
-# This is a arvados-formula setting.
-# If branch is set, the script will switch to it before running salt
-# Usually not needed, only used for testing
-# BRANCH="master"
-
-##########################################################
-# Usually there's no need to modify things below this line
-
-# Formulas versions
-ARVADOS_TAG="v1.1.4"
-POSTGRES_TAG="v0.41.3"
-NGINX_TAG="v2.4.0"
-DOCKER_TAG="v1.0.0"
-LOCALE_TAG="v0.3.4"
-
 set -o pipefail
 
 # capture the directory that the script is running from
@@ -65,18 +20,32 @@ usage() {
   echo >&2 "Usage: ${0} [-h] [-h]"
   echo >&2
   echo >&2 "${0} options:"
-  echo >&2 "  -d, --debug             Run salt installation in debug mode"
-  echo >&2 "  -p <N>, --ssl-port <N>  SSL port to use for the web applications"
-  echo >&2 "  -t, --test              Test installation running a CWL workflow"
-  echo >&2 "  -h, --help              Display this help and exit"
-  echo >&2 "  -v, --vagrant           Run in vagrant and use the /vagrant shared dir"
+  echo >&2 "  -d, --debug                                 Run salt installation in debug mode"
+  echo >&2 "  -p <N>, --ssl-port <N>                      SSL port to use for the web applications"
+  echo >&2 "  -c <local.params>, --config <local.params>  Path to the local.params config file"
+  echo >&2 "  -t, --test                                  Test installation running a CWL workflow"
+  echo >&2 "  -r, --roles                                 List of Arvados roles to apply to the host, comma separated"
+  echo >&2 "                                              Possible values are:"
+  echo >&2 "                                                api"
+  echo >&2 "                                                controller"
+  echo >&2 "                                                keepstore"
+  echo >&2 "                                                websocket"
+  echo >&2 "                                                keepweb"
+  echo >&2 "                                                workbench2"
+  echo >&2 "                                                keepproxy"
+  echo >&2 "                                                shell"
+  echo >&2 "                                                workbench"
+  echo >&2 "                                                dispatcher"
+  echo >&2 "                                              Defaults to applying them all"
+  echo >&2 "  -h, --help                                  Display this help and exit"
+  echo >&2 "  -v, --vagrant                               Run in vagrant and use the /vagrant shared dir"
   echo >&2
 }
 
 arguments() {
   # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
-  TEMP=$(getopt -o dhp:tv \
-    --long debug,help,ssl-port:,test,vagrant \
+  TEMP=$(getopt -o c:dhp:r:tv \
+    --long config:,debug,help,ssl-port:,roles:,test,vagrant \
     -n "${0}" -- "${@}")
 
   if [ ${?} != 0 ] ; then echo "GNU getopt missing? Use -h for help"; exit 1 ; fi
@@ -85,10 +54,31 @@ arguments() {
 
   while [ ${#} -ge 1 ]; do
     case ${1} in
+      -c | --config)
+        CONFIG_FILE=${2}
+        shift 2
+        ;;
       -d | --debug)
         LOG_LEVEL="debug"
         shift
         ;;
+      -p | --ssl-port)
+        CONTROLLER_EXT_SSL_PORT=${2}
+        shift 2
+        ;;
+      -r | --roles)
+        for i in ${2//,/ }
+          do
+            # Verify the role exists
+            if [[ ! "database,api,controller,keepstore,websocket,keepweb,workbench2,keepproxy,shell,workbench,dispatcher" == *"$i"* ]]; then
+              echo "The role '${i}' is not a valid role"
+              usage
+              exit 1
+            fi
+            ROLES="${ROLES} ${i}"
+          done
+          shift 2
+        ;;
       -t | --test)
         TEST="yes"
         shift
@@ -97,10 +87,6 @@ arguments() {
         VAGRANT="yes"
         shift
         ;;
-      -p | --ssl-port)
-        HOST_SSL_PORT=${2}
-        shift 2
-        ;;
       --)
         shift
         break
@@ -113,11 +99,43 @@ arguments() {
   done
 }
 
+CONFIG_FILE="${SCRIPT_DIR}/local.params"
+CONFIG_DIR="local_config_dir"
 LOG_LEVEL="info"
-HOST_SSL_PORT=443
+CONTROLLER_EXT_SSL_PORT=443
 TESTS_DIR="tests"
 
-arguments ${@}
+CLUSTER=""
+DOMAIN=""
+
+# Hostnames/IPs used for single-host deploys
+HOSTNAME_EXT=""
+HOSTNAME_INT="127.0.1.1"
+
+# Initial user setup
+INITIAL_USER=""
+INITIAL_USER_EMAIL=""
+INITIAL_USER_PASSWORD=""
+
+CONTROLLER_EXT_SSL_PORT=8000
+KEEP_EXT_SSL_PORT=25101
+# Both for collections and downloads
+KEEPWEB_EXT_SSL_PORT=9002
+WEBSHELL_EXT_SSL_PORT=4202
+WEBSOCKET_EXT_SSL_PORT=8002
+WORKBENCH1_EXT_SSL_PORT=443
+WORKBENCH2_EXT_SSL_PORT=3001
+
+RELEASE="production"
+VERSION="2.1.2-1"
+
+# Formulas versions
+ARVADOS_TAG="master"
+POSTGRES_TAG="v0.41.6"
+NGINX_TAG="temp-fix-missing-statements-in-pillar"
+DOCKER_TAG="v1.0.0"
+LOCALE_TAG="v0.3.4"
+LETSENCRYPT_TAG="v2.1.0"
 
 # Salt's dir
 ## states
@@ -127,15 +145,50 @@ F_DIR="/srv/formulas"
 ##pillars
 P_DIR="/srv/pillars"
 
+arguments ${@}
+
+if [ -s ${CONFIG_FILE} ]; then
+  source ${CONFIG_FILE}
+else
+  echo >&2 "Please create a '${CONFIG_FILE}' file with initial values, as described in"
+  echo >&2 "  * https://doc.arvados.org/install/salt-single-host.html#single_host, or"
+  echo >&2 "  * https://doc.arvados.org/install/salt-multi-host.html#multi_host_multi_hostnames"
+  exit 1
+fi
+
+if [ ! -d ${CONFIG_DIR} ]; then
+  echo >&2 "Please create a '${CONFIG_DIR}' with initial values, as described in"
+  echo >&2 "  * https://doc.arvados.org/install/salt-single-host.html#single_host, or"
+  echo >&2 "  * https://doc.arvados.org/install/salt-multi-host.html#multi_host_multi_hostnames"
+  exit 1
+fi
+
+if grep -q 'fixme_or_this_wont_work' ${CONFIG_FILE} ; then
+  echo >&2 "The config file ${CONFIG_FILE} has some parameters that need to be modified."
+  echo >&2 "Please, fix them and re-run the provision script."
+  exit 1
+fi
+
+if ! grep -E '^[[:alnum:]]{5}$' <<<${CLUSTER} ; then
+  echo >&2 "ERROR: <CLUSTER> must be exactly 5 alphanumeric characters long"
+  echo >&2 "Fix the cluster name in the 'local.params' file and re-run the provision script"
+  exit 1
+fi
+
+# Only used in single_host/single_name deploys
+if [ "x${HOSTNAME_EXT}" = "x" ] ; then
+  HOSTNAME_EXT="${CLUSTER}.${DOMAIN}"
+fi
+
 apt-get update
 apt-get install -y curl git jq
 
-dpkg -l |grep salt-minion
-if [ ${?} -eq 0 ]; then
+if which salt-call; then
   echo "Salt already installed"
 else
   curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
-  sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+  sh /tmp/bootstrap_salt.sh -XdfP -x python3
+  /bin/systemctl stop salt-minion.service
   /bin/systemctl disable salt-minion.service
 fi
 
@@ -146,98 +199,273 @@ file_roots:
   base:
     - ${S_DIR}
     - ${F_DIR}/*
-    - ${F_DIR}/*/test/salt/states/examples
 
 pillar_roots:
   base:
     - ${P_DIR}
 EOFSM
 
-mkdir -p ${S_DIR}
-mkdir -p ${F_DIR}
-mkdir -p ${P_DIR}
-
-# States
-cat > ${S_DIR}/top.sls << EOFTSLS
-base:
-  '*':
-    - single_host.host_entries
-    - single_host.snakeoil_certs
-    - locale
-    - nginx.passenger
-    - postgres
-    - docker
-    - arvados
-EOFTSLS
-
-# Pillars
-cat > ${P_DIR}/top.sls << EOFPSLS
-base:
-  '*':
-    - arvados
-    - docker
-    - locale
-    - nginx_api_configuration
-    - nginx_controller_configuration
-    - nginx_keepproxy_configuration
-    - nginx_keepweb_configuration
-    - nginx_passenger
-    - nginx_websocket_configuration
-    - nginx_webshell_configuration
-    - nginx_workbench2_configuration
-    - nginx_workbench_configuration
-    - postgresql
-EOFPSLS
+mkdir -p ${S_DIR} ${F_DIR} ${P_DIR}
 
 # Get the formula and dependencies
 cd ${F_DIR} || exit 1
-git clone --branch "${ARVADOS_TAG}" https://github.com/saltstack-formulas/arvados-formula.git
-git clone --branch "${DOCKER_TAG}" https://github.com/saltstack-formulas/docker-formula.git
-git clone --branch "${LOCALE_TAG}" https://github.com/saltstack-formulas/locale-formula.git
-git clone --branch "${NGINX_TAG}" https://github.com/saltstack-formulas/nginx-formula.git
-git clone --branch "${POSTGRES_TAG}" https://github.com/saltstack-formulas/postgres-formula.git
-
+git clone --branch "${ARVADOS_TAG}"     https://github.com/arvados/arvados-formula.git
+git clone --branch "${DOCKER_TAG}"      https://github.com/saltstack-formulas/docker-formula.git
+git clone --branch "${LOCALE_TAG}"      https://github.com/saltstack-formulas/locale-formula.git
+# git clone --branch "${NGINX_TAG}"       https://github.com/saltstack-formulas/nginx-formula.git
+git clone --branch "${NGINX_TAG}"       https://github.com/netmanagers/nginx-formula.git
+git clone --branch "${POSTGRES_TAG}"    https://github.com/saltstack-formulas/postgres-formula.git
+git clone --branch "${LETSENCRYPT_TAG}" https://github.com/saltstack-formulas/letsencrypt-formula.git
+
+# If we want to try a specific branch of the formula
 if [ "x${BRANCH}" != "x" ]; then
   cd ${F_DIR}/arvados-formula || exit 1
-  git checkout -t origin/"${BRANCH}"
+  git checkout -t origin/"${BRANCH}" -b "${BRANCH}"
   cd -
 fi
 
 if [ "x${VAGRANT}" = "xyes" ]; then
-  SOURCE_PILLARS_DIR="/vagrant/${CONFIG_DIR}"
-  TESTS_DIR="/vagrant/${TESTS_DIR}"
+  EXTRA_STATES_DIR="/home/vagrant/${CONFIG_DIR}/states"
+  SOURCE_PILLARS_DIR="/home/vagrant/${CONFIG_DIR}/pillars"
+  SOURCE_TESTS_DIR="/home/vagrant/${TESTS_DIR}"
 else
-  SOURCE_PILLARS_DIR="${SCRIPT_DIR}/${CONFIG_DIR}"
-  TESTS_DIR="${SCRIPT_DIR}/${TESTS_DIR}"
+  EXTRA_STATES_DIR="${SCRIPT_DIR}/${CONFIG_DIR}/states"
+  SOURCE_PILLARS_DIR="${SCRIPT_DIR}/${CONFIG_DIR}/pillars"
+  SOURCE_TESTS_DIR="${SCRIPT_DIR}/${TESTS_DIR}"
 fi
 
-# Replace cluster and domain name in the example pillars and test files
+SOURCE_STATES_DIR="${EXTRA_STATES_DIR}"
+
+# Replace variables (cluster,  domain, etc) in the pillars, states and tests
+# to ease deployment for newcomers
+if [ ! -d "${SOURCE_PILLARS_DIR}" ]; then
+  echo "${SOURCE_PILLARS_DIR} does not exist or is not a directory. Exiting."
+  exit 1
+fi
 for f in "${SOURCE_PILLARS_DIR}"/*; do
-  sed "s/__CLUSTER__/${CLUSTER}/g;
-       s/__DOMAIN__/${DOMAIN}/g;
-       s/__RELEASE__/${RELEASE}/g;
-       s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
-       s/__GUEST_SSL_PORT__/${GUEST_SSL_PORT}/g;
-       s/__INITIAL_USER__/${INITIAL_USER}/g;
-       s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
-       s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g;
-       s/__VERSION__/${VERSION}/g" \
+  sed "s#__ANONYMOUS_USER_TOKEN__#${ANONYMOUS_USER_TOKEN}#g;
+       s#__BLOB_SIGNING_KEY__#${BLOB_SIGNING_KEY}#g;
+       s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
+       s#__CLUSTER__#${CLUSTER}#g;
+       s#__DOMAIN__#${DOMAIN}#g;
+       s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;
+       s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+       s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
+       s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
+       s#__INITIAL_USER__#${INITIAL_USER}#g;
+       s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;
+       s#__KEEPWEB_EXT_SSL_PORT__#${KEEPWEB_EXT_SSL_PORT}#g;
+       s#__KEEP_EXT_SSL_PORT__#${KEEP_EXT_SSL_PORT}#g;
+       s#__MANAGEMENT_TOKEN__#${MANAGEMENT_TOKEN}#g;
+       s#__RELEASE__#${RELEASE}#g;
+       s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g;
+       s#__VERSION__#${VERSION}#g;
+       s#__WEBSHELL_EXT_SSL_PORT__#${WEBSHELL_EXT_SSL_PORT}#g;
+       s#__WEBSOCKET_EXT_SSL_PORT__#${WEBSOCKET_EXT_SSL_PORT}#g;
+       s#__WORKBENCH1_EXT_SSL_PORT__#${WORKBENCH1_EXT_SSL_PORT}#g;
+       s#__WORKBENCH2_EXT_SSL_PORT__#${WORKBENCH2_EXT_SSL_PORT}#g;
+       s#__CLUSTER_INT_CIDR__#${CLUSTER_INT_CIDR}#g;
+       s#__CONTROLLER_INT_IP__#${CONTROLLER_INT_IP}#g;
+       s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g;
+       s#__KEEP_INT_IP__#${KEEP_INT_IP}#g;
+       s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g;
+       s#__KEEPSTORE1_INT_IP__#${KEEPSTORE1_INT_IP}#g;
+       s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g;
+       s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g;
+       s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;
+       s#__WORKBENCH2_INT_IP__#${WORKBENCH2_INT_IP}#g;
+       s#__DATABASE_INT_IP__#${DATABASE_INT_IP}#g;
+       s#__WORKBENCH_SECRET_KEY__#${WORKBENCH_SECRET_KEY}#g" \
   "${f}" > "${P_DIR}"/$(basename "${f}")
 done
 
+if [ "x${TEST}" = "xyes" ] && [ ! -d "${SOURCE_TESTS_DIR}" ]; then
+  echo "You requested to run tests, but ${SOURCE_TESTS_DIR} does not exist or is not a directory. Exiting."
+  exit 1
+fi
 mkdir -p /tmp/cluster_tests
-# Replace cluster and domain name in the example pillars and test files
-for f in "${TESTS_DIR}"/*; do
-  sed "s/__CLUSTER__/${CLUSTER}/g;
-       s/__DOMAIN__/${DOMAIN}/g;
-       s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
-       s/__INITIAL_USER__/${INITIAL_USER}/g;
-       s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
-       s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g" \
-  ${f} > /tmp/cluster_tests/$(basename ${f})
+# Replace cluster and domain name in the test files
+for f in "${SOURCE_TESTS_DIR}"/*; do
+  sed "s#__CLUSTER__#${CLUSTER}#g;
+       s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
+       s#__DOMAIN__#${DOMAIN}#g;
+       s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+       s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
+       s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g
+       s#__INITIAL_USER__#${INITIAL_USER}#g;
+       s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;
+       s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g" \
+  "${f}" > "/tmp/cluster_tests"/$(basename "${f}")
 done
 chmod 755 /tmp/cluster_tests/run-test.sh
 
+# Replace helper state files that differ from the formula's examples
+if [ -d "${SOURCE_STATES_DIR}" ]; then
+  mkdir -p "${F_DIR}"/extra/extra
+
+  for f in "${SOURCE_STATES_DIR}"/*; do
+    sed "s#__ANONYMOUS_USER_TOKEN__#${ANONYMOUS_USER_TOKEN}#g;
+         s#__CLUSTER__#${CLUSTER}#g;
+         s#__BLOB_SIGNING_KEY__#${BLOB_SIGNING_KEY}#g;
+         s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
+         s#__DOMAIN__#${DOMAIN}#g;
+         s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;
+         s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+         s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
+         s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
+         s#__INITIAL_USER__#${INITIAL_USER}#g;
+         s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;
+         s#__KEEPWEB_EXT_SSL_PORT__#${KEEPWEB_EXT_SSL_PORT}#g;
+         s#__KEEP_EXT_SSL_PORT__#${KEEP_EXT_SSL_PORT}#g;
+         s#__MANAGEMENT_TOKEN__#${MANAGEMENT_TOKEN}#g;
+         s#__RELEASE__#${RELEASE}#g;
+         s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g;
+         s#__VERSION__#${VERSION}#g;
+         s#__CLUSTER_INT_CIDR__#${CLUSTER_INT_CIDR}#g;
+         s#__CONTROLLER_INT_IP__#${CONTROLLER_INT_IP}#g;
+         s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g;
+         s#__KEEP_INT_IP__#${KEEP_INT_IP}#g;
+         s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g;
+         s#__KEEPSTORE1_INT_IP__#${KEEPSTORE1_INT_IP}#g;
+         s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g;
+         s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g;
+         s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;
+         s#__WORKBENCH2_INT_IP__#${WORKBENCH2_INT_IP}#g;
+         s#__DATABASE_INT_IP__#${DATABASE_INT_IP}#g;
+         s#__WEBSHELL_EXT_SSL_PORT__#${WEBSHELL_EXT_SSL_PORT}#g;
+         s#__WEBSOCKET_EXT_SSL_PORT__#${WEBSOCKET_EXT_SSL_PORT}#g;
+         s#__WORKBENCH1_EXT_SSL_PORT__#${WORKBENCH1_EXT_SSL_PORT}#g;
+         s#__WORKBENCH2_EXT_SSL_PORT__#${WORKBENCH2_EXT_SSL_PORT}#g;
+         s#__WORKBENCH_SECRET_KEY__#${WORKBENCH_SECRET_KEY}#g" \
+    "${f}" > "${F_DIR}/extra/extra"/$(basename "${f}")
+  done
+fi
+
+# Now, we build the SALT states/pillars trees
+# As we need to separate both states and pillars in case we want specific
+# roles, we iterate on both at the same time
+
+# States
+cat > ${S_DIR}/top.sls << EOFTSLS
+base:
+  '*':
+    - locale
+EOFTSLS
+
+# Pillars
+cat > ${P_DIR}/top.sls << EOFPSLS
+base:
+  '*':
+    - locale
+    - arvados
+EOFPSLS
+
+# States, extra states
+if [ -d "${F_DIR}"/extra/extra ]; then
+  for f in "${F_DIR}"/extra/extra/*.sls; do
+  echo "    - extra.$(basename ${f} | sed 's/.sls$//g')" >> ${S_DIR}/top.sls
+  done
+fi
+
+# If we want specific roles for a node, just add the desired states
+# and its dependencies
+if [ -z "${ROLES}" ]; then
+  # States
+  echo "    - nginx.passenger" >> ${S_DIR}/top.sls
+  if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
+    grep -q "letsencrypt" ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+  fi
+  echo "    - postgres" >> ${S_DIR}/top.sls
+  echo "    - docker" >> ${S_DIR}/top.sls
+  echo "    - arvados" >> ${S_DIR}/top.sls
+
+  # Pillars
+  echo "    - docker" >> ${P_DIR}/top.sls
+  echo "    - nginx_api_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_controller_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_keepproxy_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_keepweb_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_passenger" >> ${P_DIR}/top.sls
+  echo "    - nginx_websocket_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_webshell_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_workbench2_configuration" >> ${P_DIR}/top.sls
+  echo "    - nginx_workbench_configuration" >> ${P_DIR}/top.sls
+  echo "    - postgresql" >> ${P_DIR}/top.sls
+  if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
+    grep -q "letsencrypt" ${P_DIR}/top.sls || echo "    - letsencrypt" >> ${P_DIR}/top.sls
+  fi
+else
+  # If we add individual roles, make sure we add the repo first
+  echo "    - arvados.repo" >> ${S_DIR}/top.sls
+  for R in ${ROLES}; do
+    case "${R}" in
+      "database")
+        # States
+        echo "    - postgres" >> ${S_DIR}/top.sls
+        # Pillars
+        echo '    - postgresql' >> ${P_DIR}/top.sls
+      ;;
+      "api")
+        # States
+        # FIXME: https://dev.arvados.org/issues/17352
+        grep -q "postgres.client" ${S_DIR}/top.sls || echo "    - postgres.client" >> ${S_DIR}/top.sls
+        grep -q "nginx.passenger" ${S_DIR}/top.sls || echo "    - nginx.passenger" >> ${S_DIR}/top.sls
+        ### If we don't install and run LE before arvados-api-server, it fails and breaks everything
+        ### after it so we add this here, as we are, after all, sharing the host for api and controller
+        if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
+          grep -q "letsencrypt" ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+        fi
+        grep -q "arvados.${R}" ${S_DIR}/top.sls    || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
+        # Pillars
+        grep -q "docker" ${P_DIR}/top.sls                   || echo "    - docker" >> ${P_DIR}/top.sls
+        grep -q "postgresql" ${P_DIR}/top.sls               || echo "    - postgresql" >> ${P_DIR}/top.sls
+        grep -q "nginx_passenger" ${P_DIR}/top.sls          || echo "    - nginx_passenger" >> ${P_DIR}/top.sls
+        grep -q "nginx_${R}_configuration" ${P_DIR}/top.sls || echo "    - nginx_${R}_configuration" >> ${P_DIR}/top.sls
+      ;;
+      "controller" | "websocket" | "workbench" | "workbench2" | "keepweb" | "keepproxy")
+        # States
+        grep -q "nginx.passenger" ${S_DIR}/top.sls || echo "    - nginx.passenger" >> ${S_DIR}/top.sls
+        if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
+          grep -q "letsencrypt" ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+        fi
+        grep -q "arvados.${R}" ${S_DIR}/top.sls    || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
+        # Pillars
+        grep -q "nginx_passenger" ${P_DIR}/top.sls          || echo "    - nginx_passenger" >> ${P_DIR}/top.sls
+        grep -q "nginx_${R}_configuration" ${P_DIR}/top.sls || echo "    - nginx_${R}_configuration" >> ${P_DIR}/top.sls
+        if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
+          grep -q "letsencrypt" ${P_DIR}/top.sls || echo "    - letsencrypt" >> ${P_DIR}/top.sls
+          grep -q "letsencrypt_${R}_configuration" ${P_DIR}/top.sls || echo "    - letsencrypt_${R}_configuration" >> ${P_DIR}/top.sls
+        fi
+      ;;
+      "shell")
+        # States
+        grep -q "docker" ${S_DIR}/top.sls       || echo "    - docker" >> ${S_DIR}/top.sls
+        grep -q "arvados.${R}" ${S_DIR}/top.sls || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
+        # Pillars
+        grep -q "" ${P_DIR}/top.sls                             || echo "    - docker" >> ${P_DIR}/top.sls
+        grep -q "nginx_webshell_configuration" ${P_DIR}/top.sls || echo "    - nginx_webshell_configuration" >> ${P_DIR}/top.sls
+      ;;
+      "dispatcher")
+        # States
+        grep -q "docker" ${S_DIR}/top.sls       || echo "    - docker" >> ${S_DIR}/top.sls
+        grep -q "arvados.${R}" ${S_DIR}/top.sls || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
+        # Pillars
+        # ATM, no specific pillar needed
+      ;;
+      "keepstore")
+        # States
+        grep -q "arvados.${R}" ${S_DIR}/top.sls || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
+        # Pillars
+        # ATM, no specific pillar needed
+      ;;
+      *)
+        echo "Unknown role ${R}"
+        exit 1
+      ;;
+    esac
+  done
+fi
+
 # FIXME! #16992 Temporary fix for psql call in arvados-api-server
 if [ -e /root/.psqlrc ]; then
   if ! ( grep 'pset pager off' /root/.psqlrc ); then
@@ -270,12 +498,12 @@ fi
 echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
 # If running in a vagrant VM, also add default user to docker group
 if [ "x${VAGRANT}" = "xyes" ]; then
-  cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant
+  cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
 
   echo "Adding the vagrant user to the docker group"
   usermod -a -G docker vagrant
 else
-  cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}
+  cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
 fi
 
 # Test that the installation finished correctly
index 8d9de6fdf0b12e338208fa8ba2fcd89b5b995139..53c51a2c5a097d2e8b45446ea26a7e2a26800f2d 100755 (executable)
@@ -3,14 +3,14 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-export ARVADOS_API_TOKEN=changemesystemroottoken
-export ARVADOS_API_HOST=__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+export ARVADOS_API_TOKEN=__SYSTEM_ROOT_TOKEN__
+export ARVADOS_API_HOST=__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__
 export ARVADOS_API_HOST_INSECURE=true
 
 set -o pipefail
 
 # First, validate that the CA is installed and that we can query it with no errors.
-if ! curl -s -o /dev/null https://workbench.${ARVADOS_API_HOST}/users/welcome?return_to=%2F; then
+if ! curl -s -o /dev/null https://${ARVADOS_API_HOST}/users/welcome?return_to=%2F; then
   echo "The Arvados CA was not correctly installed. Although some components will work,"
   echo "others won't. Please verify that the CA cert file was installed correctly and"
   echo "retry running these tests."
diff --git a/tools/terraform/.gitignore b/tools/terraform/.gitignore
new file mode 100644 (file)
index 0000000..df47a74
--- /dev/null
@@ -0,0 +1,7 @@
+.DS_Store
+.terraform
+examples
+*backup
+*disabled
+.terraform.lock.hcl
+terraform.tfstate*