18791: make the test hasher workflow work by fixing dns resolution
authorWard Vandewege <ward@curii.com>
Fri, 25 Feb 2022 02:08:47 +0000 (21:08 -0500)
committerWard Vandewege <ward@curii.com>
Wed, 2 Mar 2022 01:10:25 +0000 (20:10 -0500)
       inside docker. Cleanup for the local.params file.

Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>

15 files changed:
doc/install/salt-single-host.html.textile.liquid
tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/docker.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_api_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_controller_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepproxy_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepweb_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_webshell_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_websocket_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench_configuration.sls
tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls
tools/salt-install/config_examples/single_host/single_hostname/states/snakeoil_certs.sls
tools/salt-install/local.params.example.single_host_single_hostname
tools/salt-install/provision.sh
tools/salt-install/tests/run-test.sh

index 039a4b1481d8eae915fd38a332563b7cab96d1b8..6d08672e4656e0f1ca7d772af3de6c703da59df6 100644 (file)
@@ -26,13 +26,13 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#limitations). Limitations of the single host install
 
-<b>NOTE: The single host installation is a good choice when evaluating Arvados, but it is not recommended for production use.</b>
+<b>NOTE: The single host installation is a good choice for evaluating Arvados, but it is not recommended for production use.</b>
 
 Using the default configuration, this installation method has a number of limitations:
 
-* all services run on the same machine, and they will compete for resources.
-* it uses the local machine disk for Keep storage (under the @/tmp@ directory).
-* it installs the @crunch-dispatch-local@ dispatcher, which can run just one concurrent CWL job. This job will be executed on the machine that runs all the Arvados services. Most workflows require at least two concurrent CWL jobs, one for the workflow runner, and one for the payload.
+* all services run on the same machine, and they will compete for resources. This includes any compute jobs.
+* it uses the local machine disk for Keep storage (under the @/tmp@ directory). There may not be a lot of space available.
+* it installs the @crunch-dispatch-local@ dispatcher, which can run just eight concurrent CWL jobs. These jobs will be executed on the same machine that runs all the Arvados services and may well starve them of resources.
 
 It is possible to start with the single host installation method and modify the Arvados configuration file later to address these limitations. E.g. switch to a "different storage volume setup":{{site.baseurl}}/install/configure-s3-object-storage.html for Keep, and switch to "the cloud dispatcher":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html to provision compute resources dynamically.
 
index 78a5a938f337d437b5a8a1606ef571945f81dccf..eb59062857384e02c01d99f7d2790618dca5c548 100644 (file)
@@ -102,7 +102,7 @@ arvados:
       # <cluster>-nyw5e-<volume>
       __CLUSTER__-nyw5e-000000000000000:
         AccessViaHosts:
-          'http://__HOSTNAME_INT__:25107':
+          'http://__IP_INT__:25107':
             ReadOnly: false
         Replication: 2
         Driver: Directory
@@ -119,21 +119,21 @@ arvados:
       Controller:
         ExternalURL: 'https://__HOSTNAME_EXT__:__CONTROLLER_EXT_SSL_PORT__'
         InternalURLs:
-          'http://__HOSTNAME_INT__:8003': {}
+          'http://__IP_INT__:8003': {}
       Keepproxy:
         ExternalURL: 'https://__HOSTNAME_EXT__:__KEEP_EXT_SSL_PORT__'
         InternalURLs:
-          'http://__HOSTNAME_INT__:25100': {}
+          'http://__IP_INT__:25100': {}
       Keepstore:
         InternalURLs:
-          'http://__HOSTNAME_INT__:25107': {}
+          'http://__IP_INT__:25107': {}
       RailsAPI:
         InternalURLs:
-          'http://__HOSTNAME_INT__:8004': {}
+          'http://__IP_INT__:8004': {}
       WebDAV:
         ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'
         InternalURLs:
-          'http://__HOSTNAME_INT__:9003': {}
+          'http://__IP_INT__:9003': {}
       WebDAVDownload:
         ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'
       WebShell:
@@ -141,7 +141,7 @@ arvados:
       Websocket:
         ExternalURL: 'wss://__HOSTNAME_EXT__:__WEBSOCKET_EXT_SSL_PORT__/websocket'
         InternalURLs:
-          'http://__HOSTNAME_INT__:8005': {}
+          'http://__IP_INT__:8005': {}
       Workbench1:
         ExternalURL: 'https://__HOSTNAME_EXT__:__WORKBENCH1_EXT_SSL_PORT__'
       Workbench2:
index 54d22561594ac4ebc2019edb9b54fe156f7440ec..30d90153e8d49899013bf41aa20cb6670b47aa88 100644 (file)
@@ -7,3 +7,4 @@ docker:
   pkg:
     docker:
       use_upstream: package
+      daemon_config: {"dns": ["__IP_INT__"]}
index 18f09af50329e4af62674e5f79393e8348e8a7c3..2c9a10cb521cbaa703e6bcde34df36f10bfabd70 100644 (file)
@@ -18,7 +18,7 @@ nginx:
         overwrite: true
         config:
           - server:
-            - listen: '__HOSTNAME_INT__:8004'
+            - listen: '__IP_INT__:8004'
             - server_name: api
             - root: /var/www/arvados-api/current/public
             - index:  index.html index.htm
index b7b75ab9c289e6e1c4f35d3edbb98b40d56bd382..dc0200b5ef98e0fc1ede9230408f9a15eeb978b9 100644 (file)
@@ -14,7 +14,7 @@ nginx:
           default: 1
           '127.0.0.0/8': 0
         upstream controller_upstream:
-          - server: '__HOSTNAME_INT__:8003  fail_timeout=10s'
+          - server: '__IP_INT__:8003  fail_timeout=10s'
 
   ### SITES
   servers:
index 81d72aac74109531a073aea84b13be8d8d56c002..5a4f2492107aeb3a578fbfada9df626c6f46649d 100644 (file)
@@ -11,7 +11,7 @@ nginx:
       ### STREAMS
       http:
         upstream keepproxy_upstream:
-          - server: '__HOSTNAME_INT__:25100 fail_timeout=10s'
+          - server: '__IP_INT__:25100 fail_timeout=10s'
 
   servers:
     managed:
index fcb56c994964bed01a55c6818ea82bae75a962ac..702dd68f6caf94f0854a7b8bca98e2b0287e6fc7 100644 (file)
@@ -11,7 +11,7 @@ nginx:
       ### STREAMS
       http:
         upstream collections_downloads_upstream:
-          - server: '__HOSTNAME_INT__:9003 fail_timeout=10s'
+          - server: '__IP_INT__:9003 fail_timeout=10s'
 
   servers:
     managed:
index 1b21aaaeb6b1744545535ea4eab4ce02ece6ac44..e7d96d2b9df2e04c6843c771356fa6396c26b9a1 100644 (file)
@@ -12,7 +12,7 @@ nginx:
       ### STREAMS
       http:
         upstream webshell_upstream:
-          - server: '__HOSTNAME_INT__:4200 fail_timeout=10s'
+          - server: '__IP_INT__:4200 fail_timeout=10s'
 
   ### SITES
   servers:
index 7c4ff7835c580a78d6ffd91b391f194518f75f66..96074256f788d4ff374e5050e186285fa8bac1f9 100644 (file)
@@ -11,7 +11,7 @@ nginx:
       ### STREAMS
       http:
         upstream websocket_upstream:
-          - server: '__HOSTNAME_INT__:8005 fail_timeout=10s'
+          - server: '__IP_INT__:8005 fail_timeout=10s'
 
   servers:
     managed:
index 9ed6e3b87aa61abcb1be03efb1a2a8e9ad2c2870..7bf095de3aa4c98a871813de3df216999719a6eb 100644 (file)
@@ -17,7 +17,7 @@ nginx:
       ### STREAMS
       http:
         upstream workbench_upstream:
-          - server: '__HOSTNAME_INT__:9000 fail_timeout=10s'
+          - server: '__IP_INT__:9000 fail_timeout=10s'
 
   ### SITES
   servers:
@@ -49,7 +49,7 @@ nginx:
         overwrite: true
         config:
           - server:
-            - listen: '__HOSTNAME_INT__:9000'
+            - listen: '__IP_INT__:9000'
             - server_name: workbench
             - root: /var/www/arvados-workbench/current/public
             - index:  index.html index.htm
index 53a9148cc0a7a832cd87d618d7576270554e30d4..a688f4f8c11535fdcaaac7b33eaaccf5cddd16c9 100644 (file)
@@ -7,12 +7,21 @@
 {%- from "arvados/map.jinja" import arvados with context %}
 {%- set tpldir = curr_tpldir %}
 
+# We need the external hostname to resolve to the internal IP for docker. We
+# tell docker to resolve via the local dnsmasq, which reads from /etc/hosts by
+# default.
+arvados_local_access_to_hostname_ext:
+  host.present:
+    - ip: __IP_INT__
+    - names:
+      - __HOSTNAME_EXT__
+
 arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
   host.present:
     - ip: 127.0.1.1
     - names:
       - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
-      # FIXME! This just works for our testings.
+      # FIXME! This just works for our testing.
       # Won't work if the cluster name != host name
       {%- for entry in [
           'api',
index b6929fb887ba6827a0979872ccee415a01d22c94..c5883b2e701668da870a5b848c299e5b0b379413 100644 (file)
@@ -101,7 +101,7 @@ arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_c
         {%- endfor %}
         DNS.8 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
         DNS.9 = '__HOSTNAME_EXT__'
-        DNS.10 = '__HOSTNAME_INT__'
+        DNS.10 = '__IP_INT__'
         CNF
 
         # The req
index de5af681ee8a888e753ca816f6c501cf41fa72e1..c07cc55ea80db43cb28ca973e207042e1b9f37ba 100644 (file)
@@ -19,7 +19,7 @@ DOMAIN="domain_fixme_or_this_wont_work"
 HOSTNAME_EXT=""
 # The internal hostname for the host. In the example files, only used in the
 # single_host/single_hostname example
-HOSTNAME_INT="127.0.1.1"
+IP_INT="127.0.1.1"
 # Host SSL port where you want to point your browser to access Arvados
 # Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
 # You can point it to another port if desired
index 961da49b6992231464993b6437413c1977ffdd0e..df3b95e0ccd630dadf617a25f074bd4fb5bf67a7 100755 (executable)
@@ -169,7 +169,7 @@ DOMAIN=""
 
 # Hostnames/IPs used for single-host deploys
 HOSTNAME_EXT=""
-HOSTNAME_INT="127.0.1.1"
+IP_INT="127.0.1.1"
 
 # Initial user setup
 INITIAL_USER=""
@@ -361,7 +361,7 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do
        s#__CLUSTER__#${CLUSTER}#g;
        s#__DOMAIN__#${DOMAIN}#g;
        s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;
-       s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+       s#__IP_INT__#${IP_INT}#g;
        s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
        s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
        s#__INITIAL_USER__#${INITIAL_USER}#g;
@@ -405,7 +405,7 @@ for f in $(ls "${SOURCE_TESTS_DIR}"/*); do
   sed "s#__CLUSTER__#${CLUSTER}#g;
        s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
        s#__DOMAIN__#${DOMAIN}#g;
-       s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+       s#__IP_INT__#${IP_INT}#g;
        s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
        s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g
        s#__INITIAL_USER__#${INITIAL_USER}#g;
@@ -426,7 +426,7 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then
          s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;
          s#__DOMAIN__#${DOMAIN}#g;
          s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;
-         s#__HOSTNAME_INT__#${HOSTNAME_INT}#g;
+         s#__IP_INT__#${IP_INT}#g;
          s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;
          s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
          s#__INITIAL_USER__#${INITIAL_USER}#g;
@@ -479,7 +479,6 @@ EOFPSLS
 # States, extra states
 if [ -d "${F_DIR}"/extra/extra ]; then
   SKIP_SNAKE_OIL="snakeoil_certs"
-
   if [[ "$DEV_MODE" = "yes" || "${SSL_MODE}" == "self-signed" ]] ; then
     # In dev mode, we create some snake oil certs that we'll
     # use as CUSTOM_CERTS, so we don't skip the states file.
@@ -520,6 +519,7 @@ if [ -z "${ROLES}" ]; then
   echo "    - postgres" >> ${S_DIR}/top.sls
   echo "    - docker.software" >> ${S_DIR}/top.sls
   echo "    - arvados" >> ${S_DIR}/top.sls
+  echo "    - extra.dns" >> ${S_DIR}/top.sls
 
   # Pillars
   echo "    - docker" >> ${P_DIR}/top.sls
@@ -548,8 +548,9 @@ if [ -z "${ROLES}" ]; then
               s#__CERT_KEY__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \
       ${P_DIR}/nginx_${c}_configuration.sls
     done
-  else
-    # Use custom certs (either dev mode or prod)
+  elif [ "${SSL_MODE}" = "bring-your-own" ]; then
+    # Use custom "bring-your-own" certs (either dev mode or prod)
+    grep -q "custom_certs"       ${S_DIR}/top.sls || echo "    - extra.custom_certs" >> ${S_DIR}/top.sls
     grep -q "extra_custom_certs" ${P_DIR}/top.sls || echo "    - extra_custom_certs" >> ${P_DIR}/top.sls
     # And add the certs in the custom_certs pillar
     echo "extra_custom_certs_dir: /srv/salt/certs" > ${P_DIR}/extra_custom_certs.sls
@@ -569,8 +570,8 @@ if [ -z "${ROLES}" ]; then
 else
   # If we add individual roles, make sure we add the repo first
   echo "    - arvados.repo" >> ${S_DIR}/top.sls
-  # We add the custom_certs state
-  grep -q "custom_certs"    ${S_DIR}/top.sls || echo "    - extra.custom_certs" >> ${S_DIR}/top.sls
+  # We add the extra_custom_certs state
+  grep -q "extra_custom_certs"    ${S_DIR}/top.sls || echo "    - extra.custom_certs" >> ${S_DIR}/top.sls
 
   # And we add the basic part for the certs pillar
   if [ "${SSL_MODE}" != "lets-encrypt" ]; then
index 5734837f3e501d53f2d6c3c69be7b48d5ecdbb37..cf43273a14d584b390079400b096f12ec1e2d683 100755 (executable)
@@ -37,10 +37,6 @@ fi
 
 echo "Arvados project uuid is '${project_uuid}'"
 
-echo "Uploading arvados/jobs' docker image to the project"
-VERSION="2.1.1"
-arv-keepdocker --pull arvados/jobs "${VERSION}" --project-uuid "${project_uuid}"
-
 # Create the initial user
 echo "Creating initial user '__INITIAL_USER__'"
 user_uuid=$(arv --format=uuid user list --filters '[["email", "=", "__INITIAL_USER_EMAIL__"], ["username", "=", "__INITIAL_USER__"]]')
@@ -69,4 +65,4 @@ echo "Switching to user '__INITIAL_USER__'"
 export ARVADOS_API_TOKEN="${user_api_token}"
 
 echo "Running test CWL workflow"
-cwl-runner --local --debug hasher-workflow.cwl hasher-workflow-job.yml
+cwl-runner --debug hasher-workflow.cwl hasher-workflow-job.yml