Merge branch '16774-keep-web-errors' refs #16774
authorPeter Amstutz <peter.amstutz@curii.com>
Mon, 30 Nov 2020 16:03:53 +0000 (11:03 -0500)
committerPeter Amstutz <peter.amstutz@curii.com>
Mon, 30 Nov 2020 16:03:53 +0000 (11:03 -0500)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz@curii.com>

57 files changed:
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/users_controller.rb
apps/workbench/app/views/users/profile.html.erb
build/build-dev-docker-jobs-image.sh
build/run-build-docker-jobs-image.sh
build/run-build-packages-python-and-ruby.sh
build/run-library.sh
doc/admin/config.html.textile.liquid
doc/admin/federation.html.textile.liquid
doc/admin/upgrading.html.textile.liquid
doc/api/keep-s3.html.textile.liquid
doc/api/methods.html.textile.liquid
doc/api/methods/jobs.html.textile.liquid
doc/api/methods/pipeline_templates.html.textile.liquid
doc/install/install-keep-web.html.textile.liquid
doc/install/salt-single-host.html.textile.liquid
doc/install/salt-vagrant.html.textile.liquid
doc/user/tutorials/wgs-tutorial.html.textile.liquid
lib/boot/postgresql.go
lib/controller/fed_containers.go
lib/controller/federation/conn.go
lib/controller/handler.go
lib/controller/integration_test.go
lib/controller/rpc/conn_test.go
lib/crunchrun/background.go
lib/crunchrun/crunchrun.go
lib/crunchrun/crunchrun_test.go
lib/ctrlctx/db.go
lib/install/deps.go
lib/mount/command.go
sdk/cwl/test_with_arvbox.sh
sdk/go/arvadostest/db.go
sdk/go/blockdigest/blockdigest_test.go
sdk/go/keepclient/hashcheck.go
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/support.go
services/api/app/models/api_client_authorization.rb
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/keep-web/s3.go
services/keep-web/s3_test.go
services/keep-web/server_test.go
tools/salt-install/Vagrantfile
tools/salt-install/provision.sh
tools/salt-install/single_host/arvados.sls
tools/salt-install/single_host/docker.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_api_configuration.sls
tools/salt-install/single_host/nginx_controller_configuration.sls
tools/salt-install/single_host/nginx_keepproxy_configuration.sls
tools/salt-install/single_host/nginx_keepweb_configuration.sls
tools/salt-install/single_host/nginx_webshell_configuration.sls
tools/salt-install/single_host/nginx_websocket_configuration.sls
tools/salt-install/single_host/nginx_workbench_configuration.sls
tools/salt-install/tests/hasher-workflow-job.yml [new file with mode: 0644]
tools/salt-install/tests/hasher-workflow.cwl [new file with mode: 0644]
tools/salt-install/tests/hasher.cwl [new file with mode: 0644]
tools/salt-install/tests/run-test.sh [new file with mode: 0755]
tools/salt-install/tests/test.txt [new file with mode: 0644]

index cf4bfa8c5400b56543e6cfe7174091b489095b74..6d139cd5fdb207ad872ec700225f9ae7b75b9047 100644 (file)
@@ -760,7 +760,7 @@ class ApplicationController < ActionController::Base
     if current_user && !profile_config.empty?
       current_user_profile = current_user.prefs[:profile]
       profile_config.each do |k, entry|
-        if entry['Required']
+        if entry[:Required]
           if !current_user_profile ||
              !current_user_profile[k] ||
              current_user_profile[k].empty?
index 27fc12bf4c9fc7d3239131f96e93d114588bad31..21ea7a8e693e00ccd5c4599275b44fc33b1e9cdb 100644 (file)
@@ -39,6 +39,18 @@ class UsersController < ApplicationController
 
   def profile
     params[:offer_return_to] ||= params[:return_to]
+
+    # In a federation situation, when you get a user record using
+    # "current user of token" it can fetch a stale user record from
+    # the local cluster. So even if profile settings were just written
+    # to the user record on the login cluster (because the user just
+    # filled out the profile), those profile settings may not appear
+    # in the "current user" response because it is returning a cached
+    # record from the local cluster.
+    #
+    # In this case, explicitly fetching user record forces it to get a
+    # fresh record from the login cluster.
+    Thread.current[:user] = User.find(current_user.uuid)
   end
 
   def activity
index 6692196dabf717e40defd77e9c6c0c2538d3c393..caa22bda11cd0925fb5a9a98636860ad8827c61d 100644 (file)
@@ -68,29 +68,30 @@ SPDX-License-Identifier: AGPL-3.0 %>
               </div>
 
               <% profile_config.kind_of?(Array) && profile_config.andand.each do |entry| %>
-                <% if entry['Key'] %>
+                <% if entry[:Key] %>
                   <%
                       show_save_button = true
-                      label = entry['Required'] ? '* ' : ''
-                      label += entry['FormFieldTitle']
-                      value = current_user_profile[entry['Key'].to_sym] if current_user_profile
+                      label = entry[:Required] ? '* ' : ''
+                      label += entry[:FormFieldTitle]
+                      value = current_user_profile[entry[:Key].to_sym] if current_user_profile
                   %>
                   <div class="form-group">
-                    <label for="<%=entry['Key']%>"
+                    <label for="<%=entry[:Key]%>"
                            class="col-sm-3 control-label"
-                           style=<%="color:red" if entry['Required']&&(!value||value.empty?)%>> <%=label%>
+                           style=<%="color:red" if entry[:Required]&&(!value||value.empty?)%>> <%=label%>
                     </label>
-                    <% if entry['Type'] == 'select' %>
+                    <% if entry[:Type] == 'select' %>
                       <div class="col-sm-8">
-                        <select class="form-control" name="user[prefs][profile][<%=entry['Key']%>]">
-                          <% entry['Options'].each do |option, _| %>
+                        <select class="form-control" name="user[prefs][profile][<%=entry[:Key]%>]">
+                          <% entry[:Options].each do |option, _| %>
+                           <% option = option.to_s %>
                             <option value="<%=option%>" <%='selected' if option==value%>><%=option%></option>
                           <% end %>
                         </select>
                       </div>
                     <% else %>
                       <div class="col-sm-8">
-                        <input type="text" class="form-control" name="user[prefs][profile][<%=entry['Key']%>]" placeholder="<%=entry['FormFieldDescription']%>" value="<%=value%>" ></input>
+                        <input type="text" class="form-control" name="user[prefs][profile][<%=entry[:Key]%>]" placeholder="<%=entry[:FormFieldDescription]%>" value="<%=value%>" ></input>
                       </div>
                     <% end %>
                   </div>
index 0e570d5f31838037160f5797f80e1fc0cc7048e4..af838d68e8c7e33ac5f7d1d0f10e52fa7b95b47f 100755 (executable)
@@ -69,10 +69,10 @@ fi
 
 . build/run-library.sh
 
+# This defines python_sdk_version and cwl_runner_version with python-style
+# package suffixes (.dev/rc)
 calculate_python_sdk_cwl_package_versions
 
-cwl_runner_version=$(echo -n $cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
-
 set -x
 docker build --no-cache --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool --build-arg pythoncmd=$py --build-arg pipcmd=$pipcmd -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
 echo arv-keepdocker arvados/jobs $cwl_runner_version
index 59914a2ee9dcdeb78a7de4eb9d59c7716342ff05..07577182166ed2a35a8a16eceabee47ffb1b7aa5 100755 (executable)
@@ -139,41 +139,47 @@ if [[ -z "$ARVADOS_BUILDING_VERSION" ]] && ! [[ -z "$version_tag" ]]; then
        ARVADOS_BUILDING_ITERATION="1"
 fi
 
+# This defines python_sdk_version and cwl_runner_version with python-style
+# package suffixes (.dev/rc)
 calculate_python_sdk_cwl_package_versions
 
-echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
-
-if [[ "${python_sdk_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
-       python_sdk_version="${python_sdk_version}-1"
-else
-       python_sdk_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
+if [[ -z "$cwl_runner_version" ]]; then
+  echo "ERROR: cwl_runner_version is empty";
+  exit 1
 fi
 
-# What we use to tag the Docker image.  For development and release
-# candidate packages, the OS package has a "~dev" or "~rc" suffix, but
-# Python requires a ".dev" or "rc" suffix.  Arvados-cwl-runner will be
-# expecting the Python-compatible version string when it tries to pull
-# the Docker image, but --build-arg is expecting the OS package
+echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
+
+# For development and release candidate packages, the OS package has a "~dev"
+# or "~rc" suffix, but Python requires a ".dev" or "rc" suffix.
+#
+# Arvados-cwl-runner will be expecting the Python-compatible version string
+# when it tries to pull the Docker image, so we use that to tag the Docker
+# image.
+#
+# The --build-arg docker invocation arguments are expecting the OS package
 # version.
-cwl_runner_version_tag=$(echo -n $cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+python_sdk_version_os=$(echo -n $python_sdk_version | sed s/.dev/~dev/g | sed s/rc/~rc/g)
+cwl_runner_version_os=$(echo -n $cwl_runner_version | sed s/.dev/~dev/g | sed s/rc/~rc/g)
 
-if [[ -z "$cwl_runner_version_tag" ]]; then
-  echo "ERROR: cwl_runner_version_tag is empty";
-  exit 1
+if [[ "${python_sdk_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
+       python_sdk_version_os="${python_sdk_version_os}-1"
+else
+       python_sdk_version_os="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
 fi
 
-if [[ "${cwl_runner_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
-       cwl_runner_version="${cwl_runner_version}-1"
+if [[ "${cwl_runner_version_os}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
+       cwl_runner_version_os="${cwl_runner_version_os}-1"
 else
-       cwl_runner_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
+       cwl_runner_version_os="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
 fi
 
 cd docker/jobs
 docker build $NOCACHE \
-       --build-arg python_sdk_version=${python_sdk_version} \
-       --build-arg cwl_runner_version=${cwl_runner_version} \
+       --build-arg python_sdk_version=${python_sdk_version_os} \
+       --build-arg cwl_runner_version=${cwl_runner_version_os} \
        --build-arg repo_version=${REPO} \
-       -t arvados/jobs:$cwl_runner_version_tag .
+       -t arvados/jobs:$cwl_runner_version .
 
 ECODE=$?
 
@@ -207,7 +213,7 @@ else
         ## 20150526 nico -- *sometimes* dockerhub needs re-login
         ## even though credentials are already in .dockercfg
         docker login -u arvados
-        docker_push arvados/jobs:$cwl_runner_version_tag
+        docker_push arvados/jobs:$cwl_runner_version
         title "upload arvados images finished (`timer`)"
     else
         title "upload arvados images SKIPPED because no --upload option set (`timer`)"
index f3b7564d714f41492c8ff55933707a98c99086fb..f255307607c492468f567e7f649c6dcd0818b919 100755 (executable)
@@ -6,7 +6,6 @@
 COLUMNS=80
 
 . `dirname "$(readlink -f "$0")"`/run-library.sh
-#. `dirname "$(readlink -f "$0")"`/libcloud-pin.sh
 
 read -rd "\000" helpmessage <<EOF
 $(basename $0): Build Arvados Python packages and Ruby gems
@@ -50,6 +49,16 @@ gem_wrapper() {
   title "End of $gem_name gem build (`timer`)"
 }
 
+handle_python_package () {
+  # This function assumes the current working directory is the python package directory
+  if [ -n "$(find dist -name "*-$(nohash_version_from_git).tar.gz" -print -quit)" ]; then
+    echo "This package doesn't need rebuilding."
+    return
+  fi
+  # Make sure only to use sdist - that's the only format pip can deal with (sigh)
+  python3 setup.py $DASHQ_UNLESS_DEBUG sdist
+}
+
 python_wrapper() {
   local package_name="$1"; shift
   local package_directory="$1"; shift
index 1716cf3706240323ef96486398634cd6084d449a..6f95a8f4bfd8cb9736a5b9fba6c8076005ce2de3 100755 (executable)
@@ -79,16 +79,6 @@ calculate_python_sdk_cwl_package_versions() {
   cwl_runner_version=$(cd sdk/cwl && python3 arvados_version.py)
 }
 
-handle_python_package () {
-  # This function assumes the current working directory is the python package directory
-  if [ -n "$(find dist -name "*-$(nohash_version_from_git).tar.gz" -print -quit)" ]; then
-    # This package doesn't need rebuilding.
-    return
-  fi
-  # Make sure only to use sdist - that's the only format pip can deal with (sigh)
-  python setup.py $DASHQ_UNLESS_DEBUG sdist
-}
-
 handle_ruby_gem() {
     local gem_name="$1"; shift
     local gem_version="$(nohash_version_from_git)"
@@ -690,9 +680,9 @@ fpm_build_virtualenv () {
     done
   fi
 
-  # the python-arvados-cwl-runner package comes with cwltool, expose that version
-  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/python2.7/dist/python-arvados-cwl-runner/bin/cwltool" ]]; then
-    COMMAND_ARR+=("usr/share/python2.7/dist/python-arvados-cwl-runner/bin/cwltool=/usr/bin/")
+  # the python3-arvados-cwl-runner package comes with cwltool, expose that version
+  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/python-arvados-cwl-runner/bin/cwltool" ]]; then
+    COMMAND_ARR+=("usr/share/$python/dist/python-arvados-cwl-runner/bin/cwltool=/usr/bin/")
   fi
 
   COMMAND_ARR+=(".")
index 316b6f48b7f567d8e92aefe3a9926ff1110b680c..745cd2853265a096ad99b55bd6c53124feb22871 100644 (file)
@@ -10,7 +10,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-The master Arvados configuration is stored at @/etc/arvados/config.yml@
+The Arvados configuration is stored at @/etc/arvados/config.yml@
 
 See "Migrating Configuration":config-migration.html for information about migrating from legacy component-specific configuration files.
 
index eb4a451a891e4151b016f5d9d7edf6ae0d191c90..d6ffb48f4143d9e7bfec42d80da24aaa9bc8c343 100644 (file)
@@ -57,9 +57,9 @@ Clusters:
       LoginCluster: clsr1
 </pre>
 
-The @LoginCluster@ configuration redirects all user logins to the LoginCluster, and the LoginCluster will issue API tokens which will be accepted by the federation.  Users are activated or deactivated across the entire federation based on their status on the master cluster.
+The @LoginCluster@ configuration redirects all user logins to the LoginCluster, and the LoginCluster will issue API tokens which will be accepted by the federation.  Users are activated or deactivated across the entire federation based on their status on the login cluster.
 
-Note: tokens issued by the master cluster need to be periodically re-validated when used on other clusters in the federation.  The period between revalidation attempts is configured with @Login.RemoteTokenRefresh@.  The default is 5 minutes.  A longer period reduces overhead from validating tokens, but means it may take longer for other clusters to notice when a token has been revoked or a user has changed status (being activated/deactivated, admin flag changed).
+Note: tokens issued by the login cluster need to be periodically re-validated when used on other clusters in the federation.  The period between revalidation attempts is configured with @Login.RemoteTokenRefresh@.  The default is 5 minutes.  A longer period reduces overhead from validating tokens, but means it may take longer for other clusters to notice when a token has been revoked or a user has changed status (being activated/deactivated, admin flag changed).
 
 To migrate users of existing clusters with separate user databases to use a single LoginCluster, use "arv-federation-migrate":merge-remote-account.html .
 
index e8cde5acec8fe41761e016f963a1ac8356588708..3f622112e95391d5364be1e16f211729b2c4a150 100644 (file)
@@ -35,7 +35,7 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
-h2(#master). development master (as of 2020-10-28)
+h2(#main). development main (as of 2020-10-28)
 
 "Upgrading from 2.1.0":#v2_1_0
 
index 2cae817613699a4ba08467742c736e4827fa058e..bee91516bc12fc61e87a51b603361372ad64e358 100644 (file)
@@ -21,7 +21,11 @@ To access Arvados S3 using an S3 client library, you must tell it to use the URL
 
 The "bucket name" is an Arvados collection uuid, portable data hash, or project uuid.
 
-The bucket name must be encoded as the first path segment of every request.  This is what the S3 documentation calls "Path-Style Requests".
+Path-style and virtual host-style requests are supported.
+* A path-style request uses the hostname indicated by @Services.WebDAVDownload.ExternalURL@, with the bucket name in the first path segment: @https://download.example.com/zzzzz-4zz18-asdfgasdfgasdfg/@.
+* A virtual host-style request uses the hostname pattern indicated by @Services.WebDAV.ExternalURL@, with a bucket name in place of the leading @*@: @https://zzzzz-4zz18-asdfgasdfgasdfg.collections.example.com/@.
+
+If you have wildcard DNS, TLS, and routing set up, an S3 client configured with endpoint @collections.example.com@ should work regardless of which request style it uses.
 
 h3. Supported Operations
 
@@ -70,5 +74,16 @@ h3. Authorization mechanisms
 
 Keep-web accepts AWS Signature Version 4 (AWS4-HMAC-SHA256) as well as the older V2 AWS signature.
 
-* If your client uses V4 signatures exclusively: use the Arvados token's UUID part as AccessKey, and its secret part as SecretKey.  This is preferred.
-* If your client uses V2 signatures, or a combination of V2 and V4, or the Arvados token UUID is unknown: use the secret part of the Arvados token for both AccessKey and SecretKey.
+If your client uses V4 signatures exclusively _and_ your Arvados token was issued by the same cluster you are connecting to, you can use the Arvados token's UUID part as your S3 Access Key, and its secret part as your S3 Secret Key. This is preferred, where applicable.
+
+Example using cluster @zzzzz@:
+* Arvados token: @v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@
+* Access Key: @zzzzz-gj3su-yyyyyyyyyyyyyyy@
+* Secret Key: @xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@
+
+In all other cases, replace every @/@ character in your Arvados token with @_@, and use the resulting string as both Access Key and Secret Key.
+
+Example using a cluster other than @zzzzz@ _or_ an S3 client that uses V2 signatures:
+* Arvados token: @v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@
+* Access Key: @v2_zzzzz-gj3su-yyyyyyyyyyyyyyy_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@
+* Secret Key: @v2_zzzzz-gj3su-yyyyyyyyyyyyyyy_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@
index 872a1bca7149acb22f891d243a1be316d4d7a9c8..ae96d0a3b5872dd17f1d5e91bec856ad7845c6b3 100644 (file)
@@ -103,7 +103,7 @@ table(table table-bordered table-condensed).
 |@=@, @!=@|string, number, timestamp, or null|Equality comparison|@["tail_uuid","=","xyzzy-j7d0g-fffffffffffffff"]@ @["tail_uuid","!=",null]@|
 |@<@, @<=@, @>=@, @>@|string, number, or timestamp|Ordering comparison|@["script_version",">","123"]@|
 |@like@, @ilike@|string|SQL pattern match.  Single character match is @_@ and wildcard is @%@. The @ilike@ operator is case-insensitive|@["script_version","like","d00220fb%"]@|
-|@in@, @not in@|array of strings|Set membership|@["script_version","in",["master","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@|
+|@in@, @not in@|array of strings|Set membership|@["script_version","in",["main","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@|
 |@is_a@|string|Arvados object type|@["head_uuid","is_a","arvados#collection"]@|
 |@exists@|string|Test if a subproperty is present.|@["properties","exists","my_subproperty"]@|
 
index 13fa8387679c533184f0686d31681731a7752eb2..aa7a58898a58dcb998f0de202db907a97843e5bf 100644 (file)
@@ -57,7 +57,7 @@ See "Specifying Git versions":#script_version below for more detail about accept
 
 h3(#script_version). Specifying Git versions
 
-The script_version attribute and arvados_sdk_version runtime constraint are typically given as a branch, tag, or commit hash, but there are many more ways to specify a Git commit. The "specifying revisions" section of the "gitrevisions manual page":http://git-scm.com/docs/gitrevisions.html has a definitive list. Arvados accepts Git versions in any format listed there that names a single commit (not a tree, a blob, or a range of commits). However, some kinds of names can be expected to resolve differently in Arvados than they do in your local repository. For example, <code>HEAD@{1}</code> refers to the local reflog, and @origin/master@ typically refers to a remote branch: neither is likely to work as desired if given as a Git version.
+The script_version attribute and arvados_sdk_version runtime constraint are typically given as a branch, tag, or commit hash, but there are many more ways to specify a Git commit. The "specifying revisions" section of the "gitrevisions manual page":http://git-scm.com/docs/gitrevisions.html has a definitive list. Arvados accepts Git versions in any format listed there that names a single commit (not a tree, a blob, or a range of commits). However, some kinds of names can be expected to resolve differently in Arvados than they do in your local repository. For example, <code>HEAD@{1}</code> refers to the local reflog, and @origin/main@ typically refers to a remote branch: neither is likely to work as desired if given as a Git version.
 
 h3. Runtime constraints
 
@@ -138,14 +138,14 @@ notextile. <div class="spaced-out">
 
 h4. Examples
 
-Run the script "crunch_scripts/hash.py" in the repository "you" using the "master" commit.  Arvados should re-use a previous job if the script_version of the previous job is the same as the current "master" commit. This works irrespective of whether the previous job was submitted using the name "master", a different branch name or tag indicating the same commit, a SHA-1 commit hash, etc.
+Run the script "crunch_scripts/hash.py" in the repository "you" using the "main" commit.  Arvados should re-use a previous job if the script_version of the previous job is the same as the current "main" commit. This works irrespective of whether the previous job was submitted using the name "main", a different branch name or tag indicating the same commit, a SHA-1 commit hash, etc.
 
 <notextile><pre>
 {
   "job": {
     "script": "hash.py",
     "repository": "<b>you</b>/<b>you</b>",
-    "script_version": "master",
+    "script_version": "main",
     "script_parameters": {
       "input": "c1bad4b39ca5a924e481008009d94e32+210"
     }
@@ -170,14 +170,14 @@ Run using exactly the version "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5". Arvado
 }
 </pre></notextile>
 
-Arvados should re-use a previous job if the "script_version" of the previous job is between "earlier_version_tag" and the "master" commit (inclusive), but not the commit indicated by "blacklisted_version_tag". If there are no previous jobs matching these criteria, run the job using the "master" commit.
+Arvados should re-use a previous job if the "script_version" of the previous job is between "earlier_version_tag" and the "main" commit (inclusive), but not the commit indicated by "blacklisted_version_tag". If there are no previous jobs matching these criteria, run the job using the "main" commit.
 
 <notextile><pre>
 {
   "job": {
     "script": "hash.py",
     "repository": "<b>you</b>/<b>you</b>",
-    "script_version": "master",
+    "script_version": "main",
     "script_parameters": {
       "input": "c1bad4b39ca5a924e481008009d94e32+210"
     }
@@ -195,7 +195,7 @@ The same behavior, using filters:
   "job": {
     "script": "hash.py",
     "repository": "<b>you</b>/<b>you</b>",
-    "script_version": "master",
+    "script_version": "main",
     "script_parameters": {
       "input": "c1bad4b39ca5a924e481008009d94e32+210"
     }
@@ -208,14 +208,14 @@ The same behavior, using filters:
 }
 </pre></notextile>
 
-Run the script "crunch_scripts/monte-carlo.py" in the repository "you/you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
+Run the script "crunch_scripts/monte-carlo.py" in the repository "you/you" using the current "main" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
 
 <notextile><pre>
 {
   "job": {
     "script": "monte-carlo.py",
     "repository": "<b>you</b>/<b>you</b>",
-    "script_version": "master",
+    "script_version": "main",
     "nondeterministic": true,
     "script_parameters": {
       "input": "c1bad4b39ca5a924e481008009d94e32+210"
index 40297aa05199b77ac317b8afc94843961b03702d..141072c51c451770830a9d22bd0fdd4185a826d9 100644 (file)
@@ -77,7 +77,7 @@ This is a pipeline named "Filter MD5 hash values" with two components, "do_hash"
     "do_hash": {
       "script": "hash.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": {
         "input": {
           "required": true,
@@ -90,7 +90,7 @@ This is a pipeline named "Filter MD5 hash values" with two components, "do_hash"
     "filter": {
       "script": "0-filter.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": {
         "input": {
           "output_of": "do_hash"
@@ -110,13 +110,13 @@ This pipeline consists of three components.  The components "thing1" and "thing2
     "cat_in_the_hat": {
       "script": "cat.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": { }
     },
     "thing1": {
       "script": "thing1.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": {
         "input": {
           "output_of": "cat_in_the_hat"
@@ -126,7 +126,7 @@ This pipeline consists of three components.  The components "thing1" and "thing2
     "thing2": {
       "script": "thing2.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": {
         "input": {
           "output_of": "cat_in_the_hat"
@@ -146,19 +146,19 @@ This pipeline consists of three components.  The component "cleanup" depends on
     "thing1": {
       "script": "thing1.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": { }
     },
     "thing2": {
       "script": "thing2.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": { }
     },
     "cleanup": {
       "script": "cleanup.py",
       "repository": "<b>you</b>/<b>you</b>",
-      "script_version": "master",
+      "script_version": "main",
       "script_parameters": {
         "mess1": {
           "output_of": "thing1"
index 24f37bfb4f8ee25b3b32b691624e06586f9b42d1..b797c1958e4102cf4551000ed1d691d887e1e682 100644 (file)
@@ -20,7 +20,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#introduction). Introduction
 
-The Keep-web server provides read/write HTTP (WebDAV) access to files stored in Keep.  This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, typically behind a web proxy that provides TLS support. See the "godoc page":http://godoc.org/github.com/curoverse/arvados/services/keep-web for more detail.
+The Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols.  This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, typically behind a web proxy that provides TLS support. See the "godoc page":http://godoc.org/github.com/curoverse/arvados/services/keep-web for more detail.
 
 h2(#dns). Configure DNS
 
@@ -61,6 +61,8 @@ Collections can be served from their own subdomain:
 </code></pre>
 </notextile>
 
+This option is preferred if you plan to access Keep using third-party S3 client software, because it accommodates S3 virtual host-style requests and path-style requests without any special client configuration.
+
 h4. Under the main domain
 
 Alternately, they can go under the main domain by including @--@:
index 139366179a4306b54da1bbef688ca28039d41b35..fb41d59ee2a67745ecb2b1ef163f21a2b79cd44b 100644 (file)
@@ -11,9 +11,9 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 # "Install Saltstack":#saltstack
 # "Single host install using the provision.sh script":#single_host
-# "Local testing Arvados in a Vagrant box":#vagrant
 # "DNS configuration":#final_steps
 # "Initial user and login":#initial_user
+# "Test the installed cluster running a simple workflow":#test_install
 
 h2(#saltstack). Install Saltstack
 
@@ -84,3 +84,95 @@ Assuming you didn't change these values in the @provision.sh@ script, the initia
 * User: 'admin'
 * Password: 'password'
 * Email: 'admin@arva2.arv.local'
+
+h2(#test_install). Test the installed cluster running a simple workflow
+
+The @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@. If you want to run it, just change to that directory and run:
+
+<notextile>
+<pre><code>cd /tmp/cluster_tests
+./run-test.sh
+</code></pre>
+</notextile>
+
+It will create a test user, upload a small workflow and run it. If everything goes OK, the output should similar to this (some output was shortened for clarity):
+
+<notextile>
+<pre><code>Creating Arvados Standard Docker Images project
+Arvados project uuid is 'arva2-j7d0g-0prd8cjlk6kfl7y'
+{
+ ...
+ "uuid":"arva2-o0j2j-n4zu4cak5iifq2a",
+ "owner_uuid":"arva2-tpzed-000000000000000",
+ ...
+}
+Uploading arvados/jobs' docker image to the project
+2.1.1: Pulling from arvados/jobs
+8559a31e96f4: Pulling fs layer
+...
+Status: Downloaded newer image for arvados/jobs:2.1.1
+docker.io/arvados/jobs:2.1.1
+2020-11-23 21:43:39 arvados.arv_put[32678] INFO: Creating new cache file at /home/vagrant/.cache/arvados/arv-put/c59256eda1829281424c80f588c7cc4d
+2020-11-23 21:43:46 arvados.arv_put[32678] INFO: Collection saved as 'Docker image arvados jobs:2.1.1 sha256:0dd50'
+arva2-4zz18-1u5pvbld7cvxuy2
+Creating initial user ('admin')
+Setting up user ('admin')
+{
+ "items":[
+  {
+   ...
+   "owner_uuid":"arva2-tpzed-000000000000000",
+   ...
+   "uuid":"arva2-o0j2j-1ownrdne0ok9iox"
+  },
+  {
+   ...
+   "owner_uuid":"arva2-tpzed-000000000000000",
+   ...
+   "uuid":"arva2-o0j2j-1zbeyhcwxc1tvb7"
+  },
+  {
+   ...
+   "email":"admin@arva2.arv.local",
+   ...
+   "owner_uuid":"arva2-tpzed-000000000000000",
+   ...
+   "username":"admin",
+   "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
+   ...
+  }
+ ],
+ "kind":"arvados#HashList"
+}
+Activating user 'admin'
+{
+ ...
+ "email":"admin@arva2.arv.local",
+ ...
+ "username":"admin",
+ "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
+ ...
+}
+Running test CWL workflow
+INFO /usr/bin/cwl-runner 2.1.1, arvados-python-client 2.1.1, cwltool 3.0.20200807132242
+INFO Resolved 'hasher-workflow.cwl' to 'file:///tmp/cluster_tests/hasher-workflow.cwl'
+...
+INFO Using cluster arva2 (https://arva2.arv.local:8443/)
+INFO Upload local files: "test.txt"
+INFO Uploaded to ea34d971b71d5536b4f6b7d6c69dc7f6+50 (arva2-4zz18-c8uvwqdry4r8jao)
+INFO Using collection cache size 256 MiB
+INFO [container hasher-workflow.cwl] submitted container_request arva2-xvhdp-v1bkywd58gyocwm
+INFO [container hasher-workflow.cwl] arva2-xvhdp-v1bkywd58gyocwm is Final
+INFO Overall process status is success
+INFO Final output collection d6c69a88147dde9d52a418d50ef788df+123
+{
+    "hasher_out": {
+        "basename": "hasher3.md5sum.txt",
+        "class": "File",
+        "location": "keep:d6c69a88147dde9d52a418d50ef788df+123/hasher3.md5sum.txt",
+        "size": 95
+    }
+}
+INFO Final process status is success
+</code></pre>
+</notextile>
index 41f32e51cd2f7226253208d149445eeb41d8fd2c..d9aa791f0bad3a5173d6aa9762f2e56359534f5b 100644 (file)
@@ -12,6 +12,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 # "Vagrant":#vagrant
 # "DNS configuration":#final_steps
 # "Initial user and login":#initial_user
+# "Test the installed cluster running a simple workflow":#test_install
 
 h2(#vagrant). Vagrant
 
@@ -71,3 +72,19 @@ Assuming you didn't change the defaults, the initial credentials are:
 * User: 'admin'
 * Password: 'password'
 * Email: 'admin@arva2.arv.local'
+
+h2(#test_install). Test the installed cluster running a simple workflow
+
+As documented in the <a href="{{ site.baseurl }}/install/salt-single-host.html">Single Host installation</a> page, You can run a test workflow to verify the installation finished correctly. To do so, you can follow these steps:
+
+<notextile>
+<pre><code>vagrant ssh</code></pre>
+</notextile>
+
+and once in the instance:
+
+<notextile>
+<pre><code>cd /tmp/cluster_tests
+./run-test.sh
+</code></pre>
+</notextile>
index cd4d1cc715e0f8dda3fdbe362ad096173c516f4e..a68d7ca21eeecfd58089d9dbaab1d481fd1d1b6f 100644 (file)
@@ -245,9 +245,9 @@ node.json gives a high level overview about the instance such as name, price, an
 ** Contains about resource consumption (RAM, cpu, disk, network) on the node while it was running
 This is different from the log crunchstat.txt because it includes resource consumption of Arvados components that run on the node outside the container such as crunch-run and other processes related to the Keep file system.
 
-For the highest level logs, the logs are tracking the container that ran the @arvados-cwl-runner@ process which you can think of as the “mastermind” behind tracking which parts of the CWL workflow need to be run when, which have been run already, what order they need to be run, which can be run simultaneously, and so forth and then sending out the related container requests.  Each step then has their own logs related to containers running a CWL step of the workflow including a log of standard error that contains the standard error of the code run in that CWL step.  Those logs can be found by expanding the steps and clicking on the link to the log collection.
+For the highest level logs, the logs are tracking the container that ran the @arvados-cwl-runner@ process which you can think of as the “workflow runner”. It tracks which parts of the CWL workflow need to be run when, which have been run already, what order they need to be run, which can be run simultaneously, and so forth and then creates the necessary container requests.  Each step has its own logs related to containers running a CWL step of the workflow including a log of standard error that contains the standard error of the code run in that CWL step.  Those logs can be found by expanding the steps and clicking on the link to the log collection.
 
-Let’s take a peek at a few of these logs to get you more familiar with them.  First, we can look at the @stderr.txt@ of the highest level process.  Again recall this should be of the “mastermind” @arvados-cwl-runner@ process.  You can click on the log to download it to your local machine, and when you look at the contents - you should see something like the following...
+Let’s take a peek at a few of these logs to get you more familiar with them.  First, we can look at the @stderr.txt@ of the highest level process.  Again recall this should be of the “workflow runner” @arvados-cwl-runner@ process.  You can click on the log to download it to your local machine, and when you look at the contents - you should see something like the following...
 
 <pre><code>2020-06-22T20:30:04.737703197Z INFO /usr/bin/arvados-cwl-runner 2.0.3, arvados-python-client 2.0.3, cwltool 1.0.20190831161204
 2020-06-22T20:30:04.743250012Z INFO Resolved '/var/lib/cwl/workflow.json#main' to 'file:///var/lib/cwl/workflow.json#main'
index 34ccf04a88dbd68a7822cc75b13da972e32844ee..7661c6b58795e623e3ebac21e99b100b2c474d34 100644 (file)
@@ -61,7 +61,7 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
                if err != nil {
                        return fmt.Errorf("user.Lookup(\"postgres\"): %s", err)
                }
-               postgresUid, err := strconv.Atoi(postgresUser.Uid)
+               postgresUID, err := strconv.Atoi(postgresUser.Uid)
                if err != nil {
                        return fmt.Errorf("user.Lookup(\"postgres\"): non-numeric uid?: %q", postgresUser.Uid)
                }
@@ -77,7 +77,7 @@ func (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Superviso
                if err != nil {
                        return err
                }
-               err = os.Chown(datadir, postgresUid, 0)
+               err = os.Chown(datadir, postgresUID, 0)
                if err != nil {
                        return err
                }
index 51f243e69e48d106bec6239c8cc8a66597dff60e..fd4f0521bcdcf0b0258cae415a2b63cc02043cd5 100644 (file)
@@ -66,14 +66,14 @@ func remoteContainerRequestCreate(
 
        crString, ok := request["container_request"].(string)
        if ok {
-               var crJson map[string]interface{}
-               err := json.Unmarshal([]byte(crString), &crJson)
+               var crJSON map[string]interface{}
+               err := json.Unmarshal([]byte(crString), &crJSON)
                if err != nil {
                        httpserver.Error(w, err.Error(), http.StatusBadRequest)
                        return true
                }
 
-               request["container_request"] = crJson
+               request["container_request"] = crJSON
        }
 
        containerRequest, ok := request["container_request"].(map[string]interface{})
index 986faa7b05e33e325a6bc1c15b4283ec6d79d9ed..130368124cdd904a40ceb3938122181594c26804 100644 (file)
@@ -462,7 +462,18 @@ func (conn *Conn) UserUpdate(ctx context.Context, options arvados.UpdateOptions)
        if options.BypassFederation {
                return conn.local.UserUpdate(ctx, options)
        }
-       return conn.chooseBackend(options.UUID).UserUpdate(ctx, options)
+       resp, err := conn.chooseBackend(options.UUID).UserUpdate(ctx, options)
+       if err != nil {
+               return resp, err
+       }
+       if !strings.HasPrefix(options.UUID, conn.cluster.ClusterID) {
+               // Copy the updated user record to the local cluster
+               err = conn.batchUpdateUsers(ctx, arvados.ListOptions{}, []arvados.User{resp})
+               if err != nil {
+                       return arvados.User{}, err
+               }
+       }
+       return resp, err
 }
 
 func (conn *Conn) UserUpdateUUID(ctx context.Context, options arvados.UpdateUUIDOptions) (arvados.User, error) {
index 25bba558dc68096796143b1d9bd4483d07a6f44f..6669e020fdb9046abdceca72709348526af663c5 100644 (file)
@@ -25,6 +25,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/health"
        "git.arvados.org/arvados.git/sdk/go/httpserver"
        "github.com/jmoiron/sqlx"
+       // sqlx needs lib/pq to talk to PostgreSQL
        _ "github.com/lib/pq"
 )
 
index 3da01ca6823562a6b13509adf58b9e621f704dec..6ac8c2e338205ad0fbcf0ae30e019b49b2705116 100644 (file)
@@ -8,6 +8,7 @@ import (
        "bytes"
        "context"
        "encoding/json"
+       "fmt"
        "io"
        "io/ioutil"
        "math"
@@ -15,7 +16,10 @@ import (
        "net/http"
        "net/url"
        "os"
+       "os/exec"
        "path/filepath"
+       "strconv"
+       "strings"
 
        "git.arvados.org/arvados.git/lib/boot"
        "git.arvados.org/arvados.git/lib/config"
@@ -280,6 +284,80 @@ func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
        c.Check(coll.PortableDataHash, check.Equals, pdh)
 }
 
+func (s *IntegrationSuite) TestS3WithFederatedToken(c *check.C) {
+       if _, err := exec.LookPath("s3cmd"); err != nil {
+               c.Skip("s3cmd not in PATH")
+               return
+       }
+
+       testText := "IntegrationSuite.TestS3WithFederatedToken"
+
+       conn1 := s.conn("z1111")
+       rootctx1, _, _ := s.rootClients("z1111")
+       userctx1, ac1, _, _ := s.userClients(rootctx1, c, conn1, "z1111", true)
+       conn3 := s.conn("z3333")
+
+       createColl := func(clusterID string) arvados.Collection {
+               _, ac, kc := s.clientsWithToken(clusterID, ac1.AuthToken)
+               var coll arvados.Collection
+               fs, err := coll.FileSystem(ac, kc)
+               c.Assert(err, check.IsNil)
+               f, err := fs.OpenFile("test.txt", os.O_CREATE|os.O_RDWR, 0777)
+               c.Assert(err, check.IsNil)
+               _, err = io.WriteString(f, testText)
+               c.Assert(err, check.IsNil)
+               err = f.Close()
+               c.Assert(err, check.IsNil)
+               mtxt, err := fs.MarshalManifest(".")
+               c.Assert(err, check.IsNil)
+               coll, err = s.conn(clusterID).CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+                       "manifest_text": mtxt,
+               }})
+               c.Assert(err, check.IsNil)
+               return coll
+       }
+
+       for _, trial := range []struct {
+               clusterID string // create the collection on this cluster (then use z3333 to access it)
+               token     string
+       }{
+               // Try the hardest test first: z3333 hasn't seen
+               // z1111's token yet, and we're just passing the
+               // opaque secret part, so z3333 has to guess that it
+               // belongs to z1111.
+               {"z1111", strings.Split(ac1.AuthToken, "/")[2]},
+               {"z3333", strings.Split(ac1.AuthToken, "/")[2]},
+               {"z1111", strings.Replace(ac1.AuthToken, "/", "_", -1)},
+               {"z3333", strings.Replace(ac1.AuthToken, "/", "_", -1)},
+       } {
+               c.Logf("================ %v", trial)
+               coll := createColl(trial.clusterID)
+
+               cfgjson, err := conn3.ConfigGet(userctx1)
+               c.Assert(err, check.IsNil)
+               var cluster arvados.Cluster
+               err = json.Unmarshal(cfgjson, &cluster)
+               c.Assert(err, check.IsNil)
+
+               c.Logf("TokenV2 is %s", ac1.AuthToken)
+               host := cluster.Services.WebDAV.ExternalURL.Host
+               s3args := []string{
+                       "--ssl", "--no-check-certificate",
+                       "--host=" + host, "--host-bucket=" + host,
+                       "--access_key=" + trial.token, "--secret_key=" + trial.token,
+               }
+               buf, err := exec.Command("s3cmd", append(s3args, "ls", "s3://"+coll.UUID)...).CombinedOutput()
+               c.Check(err, check.IsNil)
+               c.Check(string(buf), check.Matches, `.* `+fmt.Sprintf("%d", len(testText))+` +s3://`+coll.UUID+`/test.txt\n`)
+
+               buf, err = exec.Command("s3cmd", append(s3args, "get", "s3://"+coll.UUID+"/test.txt", c.MkDir()+"/tmpfile")...).CombinedOutput()
+               // Command fails because we don't return Etag header.
+               // c.Check(err, check.IsNil)
+               flen := strconv.Itoa(len(testText))
+               c.Check(string(buf), check.Matches, `(?ms).*`+flen+` of `+flen+`.*`)
+       }
+}
+
 func (s *IntegrationSuite) TestGetCollectionAsAnonymous(c *check.C) {
        conn1 := s.conn("z1111")
        conn3 := s.conn("z3333")
index f43cc1ddee295d506854fc97447c0cfe46d868ab..cf4dbc47673e7713e8f9d77c2ebbb449077e4447 100644 (file)
@@ -24,7 +24,11 @@ func Test(t *testing.T) {
 
 var _ = check.Suite(&RPCSuite{})
 
-const contextKeyTestTokens = "testTokens"
+type key int
+
+const (
+       contextKeyTestTokens key = iota
+)
 
 type RPCSuite struct {
        log  logrus.FieldLogger
index da536107947187e3e88f1b59800a8d217666ca00..4bb249380fd98b2e340bc4bd3bacb2b78d5f0a47 100644 (file)
@@ -132,7 +132,7 @@ func kill(uuid string, signal syscall.Signal, stdout, stderr io.Writer) error {
        var pi procinfo
        err = json.NewDecoder(f).Decode(&pi)
        if err != nil {
-               return fmt.Errorf("decode %s: %s\n", path, err)
+               return fmt.Errorf("decode %s: %s", path, err)
        }
 
        if pi.UUID != uuid || pi.PID == 0 {
index c125b27a5f0783fe757bcf29ac0b62674b68df95..3a4f3a102b86d8adb1be71d41d85e8b1723f053e 100644 (file)
@@ -455,11 +455,11 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        }
        for bind := range runner.SecretMounts {
                if _, ok := runner.Container.Mounts[bind]; ok {
-                       return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
+                       return fmt.Errorf("secret mount %q conflicts with regular mount", bind)
                }
                if runner.SecretMounts[bind].Kind != "json" &&
                        runner.SecretMounts[bind].Kind != "text" {
-                       return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
+                       return fmt.Errorf("secret mount %q type is %q but only 'json' and 'text' are permitted",
                                bind, runner.SecretMounts[bind].Kind)
                }
                binds = append(binds, bind)
@@ -474,7 +474,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                if bind == "stdout" || bind == "stderr" {
                        // Is it a "file" mount kind?
                        if mnt.Kind != "file" {
-                               return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
+                               return fmt.Errorf("unsupported mount kind '%s' for %s: only 'file' is supported", mnt.Kind, bind)
                        }
 
                        // Does path start with OutputPath?
@@ -490,7 +490,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                if bind == "stdin" {
                        // Is it a "collection" mount kind?
                        if mnt.Kind != "collection" && mnt.Kind != "json" {
-                               return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
+                               return fmt.Errorf("unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported", mnt.Kind)
                        }
                }
 
@@ -500,7 +500,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
 
                if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
                        if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
-                               return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
+                               return fmt.Errorf("only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
                        }
                }
 
@@ -508,17 +508,17 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                case mnt.Kind == "collection" && bind != "stdin":
                        var src string
                        if mnt.UUID != "" && mnt.PortableDataHash != "" {
-                               return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
+                               return fmt.Errorf("cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
                        }
                        if mnt.UUID != "" {
                                if mnt.Writable {
-                                       return fmt.Errorf("Writing to existing collections currently not permitted.")
+                                       return fmt.Errorf("writing to existing collections currently not permitted")
                                }
                                pdhOnly = false
                                src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
                        } else if mnt.PortableDataHash != "" {
                                if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
-                                       return fmt.Errorf("Can never write to a collection specified by portable data hash")
+                                       return fmt.Errorf("can never write to a collection specified by portable data hash")
                                }
                                idx := strings.Index(mnt.PortableDataHash, "/")
                                if idx > 0 {
@@ -559,15 +559,15 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        var tmpdir string
                        tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
                        if err != nil {
-                               return fmt.Errorf("While creating mount temp dir: %v", err)
+                               return fmt.Errorf("while creating mount temp dir: %v", err)
                        }
                        st, staterr := os.Stat(tmpdir)
                        if staterr != nil {
-                               return fmt.Errorf("While Stat on temp dir: %v", staterr)
+                               return fmt.Errorf("while Stat on temp dir: %v", staterr)
                        }
                        err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
                        if staterr != nil {
-                               return fmt.Errorf("While Chmod temp dir: %v", err)
+                               return fmt.Errorf("while Chmod temp dir: %v", err)
                        }
                        runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
                        if bind == runner.Container.OutputPath {
@@ -618,7 +618,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        }
 
        if runner.HostOutputDir == "" {
-               return fmt.Errorf("Output path does not correspond to a writable mount point")
+               return fmt.Errorf("output path does not correspond to a writable mount point")
        }
 
        if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
@@ -640,20 +640,20 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
 
        runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
        if err != nil {
-               return fmt.Errorf("While trying to start arv-mount: %v", err)
+               return fmt.Errorf("while trying to start arv-mount: %v", err)
        }
 
        for _, p := range collectionPaths {
                _, err = os.Stat(p)
                if err != nil {
-                       return fmt.Errorf("While checking that input files exist: %v", err)
+                       return fmt.Errorf("while checking that input files exist: %v", err)
                }
        }
 
        for _, cp := range copyFiles {
                st, err := os.Stat(cp.src)
                if err != nil {
-                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+                       return fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
                }
                if st.IsDir() {
                        err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
@@ -674,7 +674,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                                        }
                                        return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
                                } else {
-                                       return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
+                                       return fmt.Errorf("source %q is not a regular file or directory", cp.src)
                                }
                        })
                } else if st.Mode().IsRegular() {
@@ -684,7 +684,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        }
                }
                if err != nil {
-                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+                       return fmt.Errorf("while staging writable file from %q to %q: %v", cp.src, cp.bind, err)
                }
        }
 
index 55cc6ee564be66ed2ecfc0e7713ad0c150a03e9e..02ad1d0e22ef6c607be698ebb3e08caba53f9aee 100644 (file)
@@ -1506,7 +1506,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
                err := cr.SetupMounts()
                c.Check(err, NotNil)
-               c.Check(err, ErrorMatches, `Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`)
+               c.Check(err, ErrorMatches, `only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`)
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1523,7 +1523,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
                err := cr.SetupMounts()
                c.Check(err, NotNil)
-               c.Check(err, ErrorMatches, `Unsupported mount kind 'tmp' for stdin.*`)
+               c.Check(err, ErrorMatches, `unsupported mount kind 'tmp' for stdin.*`)
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1654,7 +1654,7 @@ func (s *TestSuite) TestStdoutWithWrongKindTmp(c *C) {
 }`, func(t *TestDockerClient) {})
 
        c.Check(err, NotNil)
-       c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'tmp' for stdout"), Equals, true)
+       c.Check(strings.Contains(err.Error(), "unsupported mount kind 'tmp' for stdout"), Equals, true)
 }
 
 func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {
@@ -1665,7 +1665,7 @@ func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {
 }`, func(t *TestDockerClient) {})
 
        c.Check(err, NotNil)
-       c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'collection' for stdout"), Equals, true)
+       c.Check(strings.Contains(err.Error(), "unsupported mount kind 'collection' for stdout"), Equals, true)
 }
 
 func (s *TestSuite) TestFullRunWithAPI(c *C) {
index 127be489df3a27e553f6aa421a6f1c40cdbdcc55..36d79d3d2ef89ac9819d12e3f4e2f175c96426bd 100644 (file)
@@ -12,6 +12,7 @@ import (
        "git.arvados.org/arvados.git/lib/controller/api"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "github.com/jmoiron/sqlx"
+       // sqlx needs lib/pq to talk to PostgreSQL
        _ "github.com/lib/pq"
 )
 
index 15ff0607a9a927e9659ac581d786e393b51f6c44..342ef03a7f8efb239e9594c9aea84873b69a15ea 100644 (file)
@@ -293,10 +293,10 @@ rm ${zip}
                        DataDirectory string
                        LogFile       string
                }
-               if pg_lsclusters, err2 := exec.Command("pg_lsclusters", "--no-header").CombinedOutput(); err2 != nil {
+               if pgLsclusters, err2 := exec.Command("pg_lsclusters", "--no-header").CombinedOutput(); err2 != nil {
                        err = fmt.Errorf("pg_lsclusters: %s", err2)
                        return 1
-               } else if pgclusters := strings.Split(strings.TrimSpace(string(pg_lsclusters)), "\n"); len(pgclusters) != 1 {
+               } else if pgclusters := strings.Split(strings.TrimSpace(string(pgLsclusters)), "\n"); len(pgclusters) != 1 {
                        logger.Warnf("pg_lsclusters returned %d postgresql clusters -- skipping postgresql initdb/startup, hope that's ok", len(pgclusters))
                } else if _, err = fmt.Sscanf(pgclusters[0], "%s %s %d %s %s %s %s", &pgc.Version, &pgc.Cluster, &pgc.Port, &pgc.Status, &pgc.Owner, &pgc.DataDirectory, &pgc.LogFile); err != nil {
                        err = fmt.Errorf("error parsing pg_lsclusters output: %s", err)
index 86a9085bda46fc69a517ba2be5faf7ea14688394..e92af24075f1b824c741f6f35b4de53e2346b7ed 100644 (file)
@@ -9,6 +9,7 @@ import (
        "io"
        "log"
        "net/http"
+       // pprof is only imported to register its HTTP handlers
        _ "net/http/pprof"
        "os"
 
index 935bec63b6d6a0b585c0eca38a22e20a42d304a7..0021bc8d906c5531b70c79a87d9be169658b5c57 100755 (executable)
@@ -141,9 +141,11 @@ else
   . /usr/src/arvados/build/run-library.sh
   TMPHERE=\$(pwd)
   cd /usr/src/arvados
+
+  # This defines python_sdk_version and cwl_runner_version with python-style
+  # package suffixes (.dev/rc)
   calculate_python_sdk_cwl_package_versions
 
-  cwl_runner_version=\$(echo -n \$cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
   cd \$TMPHERE
   set -u
 
index 41ecfacc480f1df0a94dca4d11faefcc36541194..c20f61db26301be6be323d2097be5c55f3d17037 100644 (file)
@@ -10,6 +10,7 @@ import (
        "git.arvados.org/arvados.git/lib/ctrlctx"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "github.com/jmoiron/sqlx"
+       // sqlx needs lib/pq to talk to PostgreSQL
        _ "github.com/lib/pq"
        "gopkg.in/check.v1"
 )
index a9994f7047c79b19c98dd5f09d8184ef048686ed..9e8f9a4a0f5522940b4c346134aef9e61c79bff2 100644 (file)
@@ -13,8 +13,8 @@ import (
 
 func getStackTrace() string {
        buf := make([]byte, 1000)
-       bytes_written := runtime.Stack(buf, false)
-       return "Stack Trace:\n" + string(buf[:bytes_written])
+       bytesWritten := runtime.Stack(buf, false)
+       return "Stack Trace:\n" + string(buf[:bytesWritten])
 }
 
 func expectEqual(t *testing.T, actual interface{}, expected interface{}) {
index 9295c14cc24a47cd38479e19a8aa57dc91c1c42a..0966e072eae6d354ad8664d935ce290fb35f7649 100644 (file)
@@ -29,36 +29,36 @@ type HashCheckingReader struct {
 // Reads from the underlying reader, update the hashing function, and
 // pass the results through. Returns BadChecksum (instead of EOF) on
 // the last read if the checksum doesn't match.
-func (this HashCheckingReader) Read(p []byte) (n int, err error) {
-       n, err = this.Reader.Read(p)
+func (hcr HashCheckingReader) Read(p []byte) (n int, err error) {
+       n, err = hcr.Reader.Read(p)
        if n > 0 {
-               this.Hash.Write(p[:n])
+               hcr.Hash.Write(p[:n])
        }
        if err == io.EOF {
-               sum := this.Hash.Sum(nil)
-               if fmt.Sprintf("%x", sum) != this.Check {
+               sum := hcr.Hash.Sum(nil)
+               if fmt.Sprintf("%x", sum) != hcr.Check {
                        err = BadChecksum
                }
        }
        return n, err
 }
 
-// WriteTo writes the entire contents of this.Reader to dest. Returns
+// WriteTo writes the entire contents of hcr.Reader to dest. Returns
 // BadChecksum if writing is successful but the checksum doesn't
 // match.
-func (this HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {
-       if writeto, ok := this.Reader.(io.WriterTo); ok {
-               written, err = writeto.WriteTo(io.MultiWriter(dest, this.Hash))
+func (hcr HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {
+       if writeto, ok := hcr.Reader.(io.WriterTo); ok {
+               written, err = writeto.WriteTo(io.MultiWriter(dest, hcr.Hash))
        } else {
-               written, err = io.Copy(io.MultiWriter(dest, this.Hash), this.Reader)
+               written, err = io.Copy(io.MultiWriter(dest, hcr.Hash), hcr.Reader)
        }
 
        if err != nil {
                return written, err
        }
 
-       sum := this.Hash.Sum(nil)
-       if fmt.Sprintf("%x", sum) != this.Check {
+       sum := hcr.Hash.Sum(nil)
+       if fmt.Sprintf("%x", sum) != hcr.Check {
                return written, BadChecksum
        }
 
@@ -68,10 +68,10 @@ func (this HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error
 // Close reads all remaining data from the underlying Reader and
 // returns BadChecksum if the checksum doesn't match. It also closes
 // the underlying Reader if it implements io.ReadCloser.
-func (this HashCheckingReader) Close() (err error) {
-       _, err = io.Copy(this.Hash, this.Reader)
+func (hcr HashCheckingReader) Close() (err error) {
+       _, err = io.Copy(hcr.Hash, hcr.Reader)
 
-       if closer, ok := this.Reader.(io.Closer); ok {
+       if closer, ok := hcr.Reader.(io.Closer); ok {
                closeErr := closer.Close()
                if err == nil {
                        err = closeErr
@@ -80,7 +80,7 @@ func (this HashCheckingReader) Close() (err error) {
        if err != nil {
                return err
        }
-       if fmt.Sprintf("%x", this.Hash.Sum(nil)) != this.Check {
+       if fmt.Sprintf("%x", hcr.Hash.Sum(nil)) != hcr.Check {
                return BadChecksum
        }
        return nil
index 59c4127240eee14a70680b96063d767ffe99ddce..57a89b50aa74362fcd4c5a229db2312b5870b249 100644 (file)
@@ -97,7 +97,7 @@ func (s *ServerRequiredSuite) TestDefaultReplications(c *C) {
 type StubPutHandler struct {
        c                  *C
        expectPath         string
-       expectApiToken     string
+       expectAPIToken     string
        expectBody         string
        expectStorageClass string
        handled            chan string
@@ -105,7 +105,7 @@ type StubPutHandler struct {
 
 func (sph StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        sph.c.Check(req.URL.Path, Equals, "/"+sph.expectPath)
-       sph.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sph.expectApiToken))
+       sph.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sph.expectAPIToken))
        sph.c.Check(req.Header.Get("X-Keep-Storage-Classes"), Equals, sph.expectStorageClass)
        body, err := ioutil.ReadAll(req.Body)
        sph.c.Check(err, Equals, nil)
@@ -256,7 +256,7 @@ type KeepServer struct {
 func RunSomeFakeKeepServers(st http.Handler, n int) (ks []KeepServer) {
        ks = make([]KeepServer, n)
 
-       for i := 0; i < n; i += 1 {
+       for i := 0; i < n; i++ {
                ks[i] = RunFakeKeepServer(st)
        }
 
@@ -464,14 +464,14 @@ func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
 type StubGetHandler struct {
        c              *C
        expectPath     string
-       expectApiToken string
+       expectAPIToken string
        httpStatus     int
        body           []byte
 }
 
 func (sgh StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        sgh.c.Check(req.URL.Path, Equals, "/"+sgh.expectPath)
-       sgh.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sgh.expectApiToken))
+       sgh.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sgh.expectAPIToken))
        resp.WriteHeader(sgh.httpStatus)
        resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(sgh.body)))
        resp.Write(sgh.body)
index 91117f2d3216ea317b05761b155a42e9e00f2da6..3b1afe1e288cdec5746cc69f167d10d89b57361b 100644 (file)
@@ -55,7 +55,7 @@ type uploadStatus struct {
        response       string
 }
 
-func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
+func (kc *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
        uploadStatusChan chan<- uploadStatus, expectedLength int64, reqid string) {
 
        var req *http.Request
@@ -77,15 +77,15 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
        }
 
        req.Header.Add("X-Request-Id", reqid)
-       req.Header.Add("Authorization", "OAuth2 "+this.Arvados.ApiToken)
+       req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
        req.Header.Add("Content-Type", "application/octet-stream")
-       req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(this.Want_replicas))
-       if len(this.StorageClasses) > 0 {
-               req.Header.Add("X-Keep-Storage-Classes", strings.Join(this.StorageClasses, ", "))
+       req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(kc.Want_replicas))
+       if len(kc.StorageClasses) > 0 {
+               req.Header.Add("X-Keep-Storage-Classes", strings.Join(kc.StorageClasses, ", "))
        }
 
        var resp *http.Response
-       if resp, err = this.httpClient().Do(req); err != nil {
+       if resp, err = kc.httpClient().Do(req); err != nil {
                DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
                uploadStatusChan <- uploadStatus{err, url, 0, 0, err.Error()}
                return
@@ -116,15 +116,15 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
        }
 }
 
-func (this *KeepClient) putReplicas(
+func (kc *KeepClient) putReplicas(
        hash string,
        getReader func() io.Reader,
        expectedLength int64) (locator string, replicas int, err error) {
 
-       reqid := this.getRequestID()
+       reqid := kc.getRequestID()
 
        // Calculate the ordering for uploading to servers
-       sv := NewRootSorter(this.WritableLocalRoots(), hash).GetSortedRoots()
+       sv := NewRootSorter(kc.WritableLocalRoots(), hash).GetSortedRoots()
 
        // The next server to try contacting
        nextServer := 0
@@ -147,15 +147,15 @@ func (this *KeepClient) putReplicas(
        }()
 
        replicasDone := 0
-       replicasTodo := this.Want_replicas
+       replicasTodo := kc.Want_replicas
 
-       replicasPerThread := this.replicasPerService
+       replicasPerThread := kc.replicasPerService
        if replicasPerThread < 1 {
                // unlimited or unknown
                replicasPerThread = replicasTodo
        }
 
-       retriesRemaining := 1 + this.Retries
+       retriesRemaining := 1 + kc.Retries
        var retryServers []string
 
        lastError := make(map[string]string)
@@ -169,7 +169,7 @@ func (this *KeepClient) putReplicas(
                                // Start some upload requests
                                if nextServer < len(sv) {
                                        DebugPrintf("DEBUG: [%s] Begin upload %s to %s", reqid, hash, sv[nextServer])
-                                       go this.uploadToKeepServer(sv[nextServer], hash, getReader(), uploadStatusChan, expectedLength, reqid)
+                                       go kc.uploadToKeepServer(sv[nextServer], hash, getReader(), uploadStatusChan, expectedLength, reqid)
                                        nextServer++
                                        active++
                                } else {
index 74a4c1efa571b5229825243c98034957e99d4e45..6b308a231cb7ede8cf50b949da75a861a46219d3 100644 (file)
@@ -130,6 +130,7 @@ class ApiClientAuthorization < ArvadosModel
 
     token_uuid = ''
     secret = token
+    stored_secret = nil         # ...if different from secret
     optional = nil
 
     case token[0..2]
@@ -206,8 +207,7 @@ class ApiClientAuthorization < ArvadosModel
         # below. If so, we'll stuff the database with hmac instead of
         # the real OIDC token.
         upstream_cluster_id = Rails.configuration.Login.LoginCluster
-        token_uuid = upstream_cluster_id + generate_uuid[5..27]
-        secret = hmac
+        stored_secret = hmac
       else
         return nil
       end
@@ -246,6 +246,23 @@ class ApiClientAuthorization < ArvadosModel
 
     remote_user_prefix = remote_user['uuid'][0..4]
 
+    if token_uuid == ''
+      # Use the same UUID as the remote when caching the token.
+      begin
+        remote_token = SafeJSON.load(
+          clnt.get_content('https://' + host + '/arvados/v1/api_client_authorizations/current',
+                           {'remote' => Rails.configuration.ClusterID},
+                           {'Authorization' => 'Bearer ' + token}))
+        token_uuid = remote_token['uuid']
+        if !token_uuid.match(HasUuid::UUID_REGEX) || token_uuid[0..4] != upstream_cluster_id
+          raise "remote cluster #{upstream_cluster_id} returned invalid token uuid #{token_uuid.inspect}"
+        end
+      rescue => e
+        Rails.logger.warn "error getting remote token details for #{token.inspect}: #{e}"
+        return nil
+      end
+    end
+
     # Clusters can only authenticate for their own users.
     if remote_user_prefix != upstream_cluster_id
       Rails.logger.warn "remote authentication rejected: claimed remote user #{remote_user_prefix} but token was issued by #{upstream_cluster_id}"
@@ -328,11 +345,18 @@ class ApiClientAuthorization < ArvadosModel
         auth.user = user
         auth.api_client_id = 0
       end
+      # If stored_secret is set, we save stored_secret in the database
+      # but return the real secret to the caller. This way, if we end
+      # up returning the auth record to the client, they see the same
+      # secret they supplied, instead of the HMAC we saved in the
+      # database.
+      stored_secret = stored_secret || secret
       auth.update_attributes!(user: user,
-                              api_token: secret,
+                              api_token: stored_secret,
                               api_client_id: 0,
                               expires_at: Time.now + Rails.configuration.Login.RemoteTokenRefresh)
-      Rails.logger.debug "cached remote token #{token_uuid} with secret #{secret} in local db"
+      Rails.logger.debug "cached remote token #{token_uuid} with secret #{stored_secret} in local db"
+      auth.api_token = secret
       return auth
     end
 
index 4115482d809974648e9cf99ea2be7800a829b45f..a5899ce8a7cc0809a57b64a9588d8e227846c274 100644 (file)
@@ -202,7 +202,7 @@ var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`
 // Cancelled or Complete. See https://dev.arvados.org/issues/10979
 func (disp *Dispatcher) checkSqueueForOrphans() {
        for _, uuid := range disp.sqCheck.All() {
-               if !containerUuidPattern.MatchString(uuid) {
+               if !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.cluster.ClusterID) {
                        continue
                }
                err := disp.TrackContainer(uuid)
index 37fee37604ccd0280b93d142587852382d4e6fd1..7fb90789a55b164bd2465bf8d2454cf2d30e4f52 100644 (file)
@@ -16,6 +16,7 @@ import (
        "net/url"
        "os"
        "path/filepath"
+       "regexp"
        "sort"
        "strconv"
        "strings"
@@ -111,6 +112,21 @@ func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string,
        return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
 }
 
+var v2tokenUnderscore = regexp.MustCompile(`^v2_[a-z0-9]{5}-gj3su-[a-z0-9]{15}_`)
+
+func unescapeKey(key string) string {
+       if v2tokenUnderscore.MatchString(key) {
+               // Entire Arvados token, with "/" replaced by "_" to
+               // avoid colliding with the Authorization header
+               // format.
+               return strings.Replace(key, "_", "/", -1)
+       } else if s, err := url.PathUnescape(key); err == nil {
+               return s
+       } else {
+               return key
+       }
+}
+
 // checks3signature verifies the given S3 V4 signature and returns the
 // Arvados token that corresponds to the given accessKey. An error is
 // returned if accessKey is not a valid token UUID or the signature
@@ -152,7 +168,7 @@ func (h *handler) checks3signature(r *http.Request) (string, error) {
        } else {
                // Access key and secret key are both an entire
                // Arvados token or OIDC access token.
-               ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+key)
+               ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+unescapeKey(key))
                err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
                secret = key
        }
@@ -170,7 +186,7 @@ func (h *handler) checks3signature(r *http.Request) (string, error) {
        } else if expect != signature {
                return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
        }
-       return secret, nil
+       return aca.TokenV2(), nil
 }
 
 func s3ErrorResponse(w http.ResponseWriter, s3code string, message string, resource string, code int) {
@@ -210,7 +226,7 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
                        s3ErrorResponse(w, InvalidRequest, "malformed Authorization header", r.URL.Path, http.StatusUnauthorized)
                        return true
                }
-               token = split[0]
+               token = unescapeKey(split[0])
        } else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
                t, err := h.checks3signature(r)
                if err != nil {
@@ -232,7 +248,15 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
        fs := client.SiteFileSystem(kc)
        fs.ForwardSlashNameSubstitution(h.Config.cluster.Collections.ForwardSlashNameSubstitution)
 
-       objectNameGiven := strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 1
+       var objectNameGiven bool
+       fspath := "/by_id"
+       if id := parseCollectionIDFromDNSName(r.Host); id != "" {
+               fspath += "/" + id
+               objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 0
+       } else {
+               objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 1
+       }
+       fspath += r.URL.Path
 
        switch {
        case r.Method == http.MethodGet && !objectNameGiven:
@@ -248,7 +272,6 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
                }
                return true
        case r.Method == http.MethodGet || r.Method == http.MethodHead:
-               fspath := "/by_id" + r.URL.Path
                fi, err := fs.Stat(fspath)
                if r.Method == "HEAD" && !objectNameGiven {
                        // HeadBucket
@@ -282,7 +305,6 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
                        s3ErrorResponse(w, InvalidArgument, "Missing object name in PUT request.", r.URL.Path, http.StatusBadRequest)
                        return true
                }
-               fspath := "by_id" + r.URL.Path
                var objectIsDir bool
                if strings.HasSuffix(fspath, "/") {
                        if !h.Config.cluster.Collections.S3FolderObjects {
@@ -377,7 +399,6 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
                        s3ErrorResponse(w, InvalidArgument, "missing object name in DELETE request", r.URL.Path, http.StatusBadRequest)
                        return true
                }
-               fspath := "by_id" + r.URL.Path
                if strings.HasSuffix(fspath, "/") {
                        fspath = strings.TrimSuffix(fspath, "/")
                        fi, err := fs.Stat(fspath)
index fb759e901cac096a3d0a61ab753af19545706729..b9a6d85ecb1a6bacc75b2a9b94b3ebc54c851f7e 100644 (file)
@@ -10,6 +10,7 @@ import (
        "fmt"
        "io/ioutil"
        "net/http"
+       "net/url"
        "os"
        "os/exec"
        "strings"
@@ -118,11 +119,15 @@ func (s *IntegrationSuite) TestS3Signatures(c *check.C) {
                secretkey string
        }{
                {true, aws.V2Signature, arvadostest.ActiveToken, "none"},
+               {true, aws.V2Signature, url.QueryEscape(arvadostest.ActiveTokenV2), "none"},
+               {true, aws.V2Signature, strings.Replace(arvadostest.ActiveTokenV2, "/", "_", -1), "none"},
                {false, aws.V2Signature, "none", "none"},
                {false, aws.V2Signature, "none", arvadostest.ActiveToken},
 
                {true, aws.V4Signature, arvadostest.ActiveTokenUUID, arvadostest.ActiveToken},
                {true, aws.V4Signature, arvadostest.ActiveToken, arvadostest.ActiveToken},
+               {true, aws.V4Signature, url.QueryEscape(arvadostest.ActiveTokenV2), url.QueryEscape(arvadostest.ActiveTokenV2)},
+               {true, aws.V4Signature, strings.Replace(arvadostest.ActiveTokenV2, "/", "_", -1), strings.Replace(arvadostest.ActiveTokenV2, "/", "_", -1)},
                {false, aws.V4Signature, arvadostest.ActiveToken, ""},
                {false, aws.V4Signature, arvadostest.ActiveToken, "none"},
                {false, aws.V4Signature, "none", arvadostest.ActiveToken},
@@ -712,3 +717,12 @@ func (s *IntegrationSuite) TestS3cmd(c *check.C) {
        c.Check(err, check.IsNil)
        c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
 }
+
+func (s *IntegrationSuite) TestS3BucketInHost(c *check.C) {
+       stage := s.s3setup(c)
+       defer stage.teardown(c)
+
+       hdr, body, _ := s.runCurl(c, "AWS "+arvadostest.ActiveTokenV2+":none", stage.coll.UUID+".collections.example.com", "/sailboat.txt")
+       c.Check(hdr, check.Matches, `(?s)HTTP/1.1 200 OK\r\n.*`)
+       c.Check(body, check.Equals, "⛵\n")
+}
index b15c33ea74b4f1593f621614bd159694f9571957..0a1c7d1b3a89a8338428cf25597ee27050633ccd 100644 (file)
@@ -257,12 +257,16 @@ func (s *IntegrationSuite) Test200(c *check.C) {
 }
 
 // Return header block and body.
-func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...string) (hdr, bodyPart string, bodySize int64) {
+func (s *IntegrationSuite) runCurl(c *check.C, auth, host, uri string, args ...string) (hdr, bodyPart string, bodySize int64) {
        curlArgs := []string{"--silent", "--show-error", "--include"}
        testHost, testPort, _ := net.SplitHostPort(s.testServer.Addr)
        curlArgs = append(curlArgs, "--resolve", host+":"+testPort+":"+testHost)
-       if token != "" {
-               curlArgs = append(curlArgs, "-H", "Authorization: OAuth2 "+token)
+       if strings.Contains(auth, " ") {
+               // caller supplied entire Authorization header value
+               curlArgs = append(curlArgs, "-H", "Authorization: "+auth)
+       } else if auth != "" {
+               // caller supplied Arvados token
+               curlArgs = append(curlArgs, "-H", "Authorization: Bearer "+auth)
        }
        curlArgs = append(curlArgs, args...)
        curlArgs = append(curlArgs, "http://"+host+":"+testPort+uri)
index 93bb77d4fac014ab04179826f11ec26ba1f0f1eb..ed3466ddebd81182540b234d63bbd221efd4b8e9 100644 (file)
@@ -13,7 +13,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
 
   config.vm.define "arvados" do |arv|
     arv.vm.box = "bento/debian-10"
-    arv.vm.hostname = "arva2.arv.local"
+    arv.vm.hostname = "vagrant.local"
+    # CPU/RAM
+    config.vm.provider :virtualbox do |v|
+      v.memory = 2048
+      v.cpus = 2
+    end
+
     # Networking
     arv.vm.network "forwarded_port", guest: 8443, host: 8443
     arv.vm.network "forwarded_port", guest: 25100, host: 25100
@@ -24,12 +30,10 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     arv.vm.network "forwarded_port", guest: 8001, host: 8001
     arv.vm.network "forwarded_port", guest: 8000, host: 8000
     arv.vm.network "forwarded_port", guest: 3001, host: 3001
-    # config.vm.network "private_network", ip: "192.168.33.10"
-    # arv.vm.synced_folder "salt_pillars", "/srv/pillars",
-    #                      create: true
     arv.vm.provision "shell",
                      path: "provision.sh",
                      args: [
+                       "--test",
                        "--vagrant",
                        "--ssl-port=8443"
                      ].join(" ")
index 7e88d7662e5b34fd007f11a11372f4b1e3f60de7..a207d019875a7b43bc2dfed1e116f4a9410ab1a5 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash -x
+#!/bin/bash 
 
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
@@ -50,29 +50,42 @@ VERSION="latest"
 
 set -o pipefail
 
+# capture the directory that the script is running from
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
 usage() {
   echo >&2
-  echo >&2 "Usage: $0 [-h] [-h]"
+  echo >&2 "Usage: ${0} [-h] [-h]"
   echo >&2
-  echo >&2 "$0 options:"
-  echo >&2 "  -v, --vagrant           Run in vagrant and use the /vagrant shared dir"
+  echo >&2 "${0} options:"
+  echo >&2 "  -d, --debug             Run salt installation in debug mode"
   echo >&2 "  -p <N>, --ssl-port <N>  SSL port to use for the web applications"
+  echo >&2 "  -t, --test              Test installation running a CWL workflow"
   echo >&2 "  -h, --help              Display this help and exit"
+  echo >&2 "  -v, --vagrant           Run in vagrant and use the /vagrant shared dir"
   echo >&2
 }
 
 arguments() {
   # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
-  TEMP=`getopt -o hvp: \
-    --long help,vagrant,ssl-port: \
-    -n "$0" -- "$@"`
+  TEMP=$(getopt -o dhp:tv \
+    --long debug,help,ssl-port:,test,vagrant \
+    -n "${0}" -- "${@}")
 
-  if [ $? != 0 ] ; then echo "GNU getopt missing? Use -h for help"; exit 1 ; fi
+  if [ ${?} != 0 ] ; then echo "GNU getopt missing? Use -h for help"; exit 1 ; fi
   # Note the quotes around `$TEMP': they are essential!
   eval set -- "$TEMP"
 
-  while [ $# -ge 1 ]; do
-    case $1 in
+  while [ ${#} -ge 1 ]; do
+    case ${1} in
+      -d | --debug)
+        LOG_LEVEL="debug"
+        shift
+        ;;
+      -t | --test)
+        TEST="yes"
+        shift
+        ;;
       -v | --vagrant)
         VAGRANT="yes"
         shift
@@ -93,9 +106,11 @@ arguments() {
   done
 }
 
+LOG_LEVEL="info"
 HOST_SSL_PORT=443
+TESTS_DIR="tests"
 
-arguments $@
+arguments ${@}
 
 # Salt's dir
 ## states
@@ -106,7 +121,7 @@ F_DIR="/srv/formulas"
 P_DIR="/srv/pillars"
 
 apt-get update
-apt-get install -y curl git
+apt-get install -y curl git jq
 
 dpkg -l |grep salt-minion
 if [ ${?} -eq 0 ]; then
@@ -139,6 +154,7 @@ mkdir -p ${P_DIR}
 cat > ${S_DIR}/top.sls << EOFTSLS
 base:
   '*':
+    - example_single_host_host_entries
     - example_add_snakeoil_certs
     - locale
     - nginx.passenger
@@ -152,6 +168,7 @@ cat > ${P_DIR}/top.sls << EOFPSLS
 base:
   '*':
     - arvados
+    - docker
     - locale
     - nginx_api_configuration
     - nginx_controller_configuration
@@ -173,25 +190,23 @@ for f in postgres arvados nginx docker locale; do
 done
 
 if [ "x${BRANCH}" != "x" ]; then
-  cd ${F_DIR}/arvados-formula
-  git checkout -t origin/${BRANCH}
+  cd ${F_DIR}/arvados-formula || exit 1
+  git checkout -t origin/"${BRANCH}"
   cd -
 fi
 
-# sed "s/__DOMAIN__/${DOMAIN}/g; s/__CLUSTER__/${CLUSTER}/g; s/__RELEASE__/${RELEASE}/g; s/__VERSION__/${VERSION}/g" \
-#   ${CONFIG_DIR}/arvados_dev.sls > ${P_DIR}/arvados.sls
-
 if [ "x${VAGRANT}" = "xyes" ]; then
   SOURCE_PILLARS_DIR="/vagrant/${CONFIG_DIR}"
+  TESTS_DIR="/vagrant/${TESTS_DIR}"
 else
-  SOURCE_PILLARS_DIR="./${CONFIG_DIR}"
+  SOURCE_PILLARS_DIR="${SCRIPT_DIR}/${CONFIG_DIR}"
+  TESTS_DIR="${SCRIPT_DIR}/${TESTS_DIR}"
 fi
 
-# Replace cluster and domain name in the example pillars
-for f in ${SOURCE_PILLARS_DIR}/*; do
-  # sed "s/example.net/${DOMAIN}/g; s/fixme/${CLUSTER}/g" \
-  sed "s/__DOMAIN__/${DOMAIN}/g;
-       s/__CLUSTER__/${CLUSTER}/g;
+# Replace cluster and domain name in the example pillars and test files
+for f in "${SOURCE_PILLARS_DIR}"/*; do
+  sed "s/__CLUSTER__/${CLUSTER}/g;
+       s/__DOMAIN__/${DOMAIN}/g;
        s/__RELEASE__/${RELEASE}/g;
        s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
        s/__GUEST_SSL_PORT__/${GUEST_SSL_PORT}/g;
@@ -199,12 +214,21 @@ for f in ${SOURCE_PILLARS_DIR}/*; do
        s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
        s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g;
        s/__VERSION__/${VERSION}/g" \
-  ${f} > ${P_DIR}/$(basename ${f})
+  "${f}" > "${P_DIR}"/$(basename "${f}")
 done
 
-# Let's write an /etc/hosts file that points all the hosts to localhost
-
-echo "127.0.0.2 api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+mkdir -p /tmp/cluster_tests
+# Replace cluster and domain name in the example pillars and test files
+for f in "${TESTS_DIR}"/*; do
+  sed "s/__CLUSTER__/${CLUSTER}/g;
+       s/__DOMAIN__/${DOMAIN}/g;
+       s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
+       s/__INITIAL_USER__/${INITIAL_USER}/g;
+       s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
+       s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g" \
+  ${f} > /tmp/cluster_tests/$(basename ${f})
+done
+chmod 755 /tmp/cluster_tests/run-test.sh
 
 # FIXME! #16992 Temporary fix for psql call in arvados-api-server
 if [ -e /root/.psqlrc ]; then
@@ -220,7 +244,7 @@ echo '\pset pager off' >> /root/.psqlrc
 # END FIXME! #16992 Temporary fix for psql call in arvados-api-server
 
 # Now run the install
-salt-call --local state.apply -l debug
+salt-call --local state.apply -l ${LOG_LEVEL}
 
 # FIXME! #16992 Temporary fix for psql call in arvados-api-server
 if [ "x${DELETE_PSQL}" = "xyes" ]; then
@@ -229,7 +253,18 @@ if [ "x${DELETE_PSQL}" = "xyes" ]; then
 fi
 
 if [ "x${RESTORE_PSQL}" = "xyes" ]; then
-  echo "Restroting .psql file"
+  echo "Restoring .psql file"
   mv -v /root/.psqlrc.provision.backup /root/.psqlrc
 fi
 # END FIXME! #16992 Temporary fix for psql call in arvados-api-server
+
+# If running in a vagrant VM, add default user to docker group
+if [ "x${VAGRANT}" = "xyes" ]; then
+  usermod -a -G docker vagrant 
+fi
+
+# Test that the installation finished correctly
+if [ "x${TEST}" = "xyes" ]; then
+  cd /tmp/cluster_tests
+  ./run-test.sh
+fi
index ad0cbab700bc29aacc58d28310393a6b60671021..dffd6575e02dc768daa7696e4a4eb94ee2146036 100644 (file)
@@ -78,19 +78,19 @@ arvados:
 
     ### TOKENS
     tokens:
-      system_root: changeme_system_root_token
-      management: changeme_management_token
-      rails_secret: changeme_rails_secret_token
-      anonymous_user: changeme_anonymous_user_token
+      system_root: changemesystemroottoken
+      management: changememanagementtoken
+      rails_secret: changemerailssecrettoken
+      anonymous_user: changemeanonymoususertoken
 
     ### KEYS
     secrets:
-      blob_signing_key: changeme_blob_signing_key
-      workbench_secret_key: changeme_workbench_secret_key
-      dispatcher_access_key: changeme_dispatcher_access_key
-      dispatcher_secret_key: changeme_dispatcher_secret_key
-      keep_access_key: changeme_keep_access_key
-      keep_secret_key: changeme_keep_secret_key
+      blob_signing_key: changemeblobsigningkey
+      workbench_secret_key: changemeworkbenchsecretkey
+      dispatcher_access_key: changemedispatcheraccesskey
+      dispatcher_secret_key: changeme_dispatchersecretkey
+      keep_access_key: changemekeepaccesskey
+      keep_secret_key: changemekeepsecretkey
 
     Login:
       Test:
@@ -124,7 +124,7 @@ arvados:
       Controller:
         ExternalURL: https://__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
         InternalURLs:
-          http://127.0.0.2:8003: {}
+          http://controller.internal:8003: {}
       DispatchCloud:
         InternalURLs:
           http://__CLUSTER__.__DOMAIN__:9006: {}
@@ -134,17 +134,17 @@ arvados:
       Keepproxy:
         ExternalURL: https://keep.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
         InternalURLs:
-          http://127.0.0.2:25100: {}
+          http://keep.internal:25100: {}
       Keepstore:
         InternalURLs:
           http://keep0.__CLUSTER__.__DOMAIN__:25107: {}
       RailsAPI:
         InternalURLs:
-          http://127.0.0.2:8004: {}
+          http://api.internal:8004: {}
       WebDAV:
         ExternalURL: https://collections.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
         InternalURLs:
-          http://127.0.0.2:9002: {}
+          http://collections.internal:9002: {}
       WebDAVDownload:
         ExternalURL: https://download.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
       WebShell:
@@ -152,7 +152,7 @@ arvados:
       Websocket:
         ExternalURL: wss://ws.__CLUSTER__.__DOMAIN__/websocket
         InternalURLs:
-          http://127.0.0.2:8005: {}
+          http://ws.internal:8005: {}
       Workbench1:
         ExternalURL: https://workbench.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
       Workbench2:
diff --git a/tools/salt-install/single_host/docker.sls b/tools/salt-install/single_host/docker.sls
new file mode 100644 (file)
index 0000000..54d2256
--- /dev/null
@@ -0,0 +1,9 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+docker:
+  pkg:
+    docker:
+      use_upstream: package
index db0bea126e2510d056780da327b09593c2f3df26..b2f12c77399bdd9df8c48f7d3ac9f9004670f1aa 100644 (file)
@@ -18,7 +18,7 @@ nginx:
         overwrite: true
         config:
           - server:
-            - listen: '127.0.0.2:8004'
+            - listen: 'api.internal:8004'
             - server_name: api
             - root: /var/www/arvados-api/current/public
             - index:  index.html index.htm
index 2b2e7d5915f88b793735c519d6f23c49596bc0c7..7c99d2dea7538e042321b1f78b1811cb8224723b 100644 (file)
@@ -14,7 +14,7 @@ nginx:
           default: 1
           '127.0.0.0/8': 0
         upstream controller_upstream:
-          - server: '127.0.0.2:8003  fail_timeout=10s'
+          - server: 'controller.internal:8003  fail_timeout=10s'
 
   ### SITES
   servers:
index 29cd0cb4401e73fa62ead9f614dceafb2f4205c1..fc4854e5a8d35fad8cabb628f3383a5a4f65dc83 100644 (file)
@@ -11,7 +11,7 @@ nginx:
       ### STREAMS
       http:
         upstream keepproxy_upstream:
-          - server: '127.0.0.2:25100 fail_timeout=10s'
+          - server: 'keep.internal:25100 fail_timeout=10s'
 
   servers:
     managed:
index bd0a636b0ecb526569b7d807d5608dc7de514af1..513c0393e0e98b2f9f48634527d083c1e415b1a3 100644 (file)
@@ -11,7 +11,7 @@ nginx:
       ### STREAMS
       http:
         upstream collections_downloads_upstream:
-          - server: '127.0.0.2:9002 fail_timeout=10s'
+          - server: 'collections.internal:9002 fail_timeout=10s'
 
   servers:
     managed:
index e33ddcea707e8bffc09269bf73d1d9790cdaab5b..495de82d235e2bebb511b56e559110f16b4573c5 100644 (file)
@@ -12,7 +12,7 @@ nginx:
       ### STREAMS
       http:
         upstream webshell_upstream:
-          - server: '127.0.0.2:4200 fail_timeout=10s'
+          - server: 'shell.internal:4200 fail_timeout=10s'
 
   ### SITES
   servers:
index 2241d3b8eaf5e8daea8c95cb723e0c7f90245b2c..1848a8737ea0fb8e10ff9c884ac87111b2a17b42 100644 (file)
@@ -11,7 +11,7 @@ nginx:
       ### STREAMS
       http:
         upstream websocket_upstream:
-          - server: '127.0.0.2:8005 fail_timeout=10s'
+          - server: 'ws.internal:8005 fail_timeout=10s'
 
   servers:
     managed:
index 76fb134385469beba6bec89b857b3be93da43e68..9a382e777cc4dbf81f2834b63816c495977c64f2 100644 (file)
@@ -17,7 +17,7 @@ nginx:
       ### STREAMS
       http:
         upstream workbench_upstream:
-          - server: '127.0.0.2:9000 fail_timeout=10s'
+          - server: 'workbench.internal:9000 fail_timeout=10s'
 
   ### SITES
   servers:
@@ -64,7 +64,7 @@ nginx:
         overwrite: true
         config:
           - server:
-            - listen: '127.0.0.2:9000'
+            - listen: 'workbench.internal:9000'
             - server_name: workbench
             - root: /var/www/arvados-workbench/current/public
             - index:  index.html index.htm
diff --git a/tools/salt-install/tests/hasher-workflow-job.yml b/tools/salt-install/tests/hasher-workflow-job.yml
new file mode 100644 (file)
index 0000000..8e5f611
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+inputfile:
+  class: File
+  path: test.txt
+hasher1_outputname: hasher1.md5sum.txt
+hasher2_outputname: hasher2.md5sum.txt
+hasher3_outputname: hasher3.md5sum.txt
diff --git a/tools/salt-install/tests/hasher-workflow.cwl b/tools/salt-install/tests/hasher-workflow.cwl
new file mode 100644 (file)
index 0000000..a23a22f
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+
+inputs:
+  inputfile: File
+  hasher1_outputname: string
+  hasher2_outputname: string
+  hasher3_outputname: string
+
+outputs:
+  hasher_out:
+    type: File
+    outputSource: hasher3/hasher_out
+
+steps:
+  hasher1:
+    run: hasher.cwl
+    in:
+      inputfile: inputfile
+      outputname: hasher1_outputname
+    out: [hasher_out]
+    hints:
+      ResourceRequirement:
+        coresMin: 1
+      arv:IntermediateOutput:
+        outputTTL: 3600
+      arv:ReuseRequirement:
+        enableReuse: false
+
+  hasher2:
+    run: hasher.cwl
+    in:
+      inputfile: hasher1/hasher_out
+      outputname: hasher2_outputname
+    out: [hasher_out]
+    hints:
+      ResourceRequirement:
+        coresMin: 1
+      arv:IntermediateOutput:
+        outputTTL: 3600
+      arv:ReuseRequirement:
+        enableReuse: false
+
+  hasher3:
+    run: hasher.cwl
+    in:
+      inputfile: hasher2/hasher_out
+      outputname: hasher3_outputname
+    out: [hasher_out]
+    hints:
+      ResourceRequirement:
+        coresMin: 1
+      arv:IntermediateOutput:
+        outputTTL: 3600
+      arv:ReuseRequirement:
+        enableReuse: false
diff --git a/tools/salt-install/tests/hasher.cwl b/tools/salt-install/tests/hasher.cwl
new file mode 100644 (file)
index 0000000..0a0f64f
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+
+baseCommand: md5sum
+inputs:
+  inputfile:
+    type: File
+    inputBinding:
+      position: 1
+  outputname:
+    type: string
+
+stdout: $(inputs.outputname)
+
+outputs:
+  hasher_out:
+    type: File
+    outputBinding:
+      glob: $(inputs.outputname)
diff --git a/tools/salt-install/tests/run-test.sh b/tools/salt-install/tests/run-test.sh
new file mode 100755 (executable)
index 0000000..cf61d92
--- /dev/null
@@ -0,0 +1,59 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+export ARVADOS_API_TOKEN=changemesystemroottoken
+export ARVADOS_API_HOST=__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+export ARVADOS_API_HOST_INSECURE=true
+
+
+# https://doc.arvados.org/v2.0/install/install-jobs-image.html
+echo "Creating Arvados Standard Docker Images project"
+uuid_prefix=$(arv --format=uuid user current | cut -d- -f1)
+project_uuid=$(arv --format=uuid group list --filters '[["name", "=", "Arvados Standard Docker Images"]]')
+
+if [ "x${project_uuid}" = "x" ]; then
+  project_uuid=$(arv --format=uuid group create --group "{\"owner_uuid\": \"${uuid_prefix}-tpzed-000000000000000\", \"group_class\":\"project\", \"name\":\"Arvados Standard Docker Images\"}")
+
+  read -rd $'\000' newlink <<EOF; arv link create --link "${newlink}"
+{
+  "tail_uuid":"${uuid_prefix}-j7d0g-fffffffffffffff",
+  "head_uuid":"${project_uuid}",
+  "link_class":"permission",
+  "name":"can_read"
+}
+EOF
+fi
+
+echo "Arvados project uuid is '${project_uuid}'"
+
+echo "Uploading arvados/jobs' docker image to the project"
+VERSION="2.1.1"
+arv-keepdocker --pull arvados/jobs "${VERSION}" --project-uuid "${project_uuid}"
+
+# Create the initial user
+echo "Creating initial user '__INITIAL_USER__'"
+user_uuid=$(arv --format=uuid user list --filters '[["email", "=", "__INITIAL_USER_EMAIL__"], ["username", "=", "__INITIAL_USER__"]]')
+
+if [ "x${user_uuid}" = "x" ]; then
+  user_uuid=$(arv --format=uuid user create --user '{"email": "__INITIAL_USER_EMAIL__", "username": "__INITIAL_USER__"}')
+  echo "Setting up user '__INITIAL_USER__'"
+  arv user setup --uuid "${user_uuid}"
+fi
+
+echo "Activating user '__INITIAL_USER__'"
+arv user update --uuid "${user_uuid}" --user '{"is_active": true}'
+
+echo "Getting the user API TOKEN"
+user_api_token=$(arv api_client_authorization list --filters "[[\"owner_uuid\", \"=\", \"${user_uuid}\"],[\"kind\", \"==\", \"arvados#apiClientAuthorization\"]]" --limit=1 |jq -r .items[].api_token)
+
+if [ "x${user_api_token}" = "x" ]; then
+  user_api_token=$(arv api_client_authorization create --api-client-authorization "{\"owner_uuid\": \"${user_uuid}\"}" | jq -r .api_token)
+fi
+
+# Change to the user's token and run the workflow
+export ARVADOS_API_TOKEN="${user_api_token}"
+
+echo "Running test CWL workflow"
+cwl-runner hasher-workflow.cwl hasher-workflow-job.yml
diff --git a/tools/salt-install/tests/test.txt b/tools/salt-install/tests/test.txt
new file mode 100644 (file)
index 0000000..a9c4395
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+test