18113: Merge branch 'main' into 18113-change-cloudops-defaults
authorWard Vandewege <ward@curii.com>
Thu, 7 Oct 2021 17:26:12 +0000 (13:26 -0400)
committerWard Vandewege <ward@curii.com>
Thu, 7 Oct 2021 17:26:12 +0000 (13:26 -0400)
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>

91 files changed:
build/run-build-packages.sh
build/run-tests.sh
cmd/arvados-server/cmd.go
cmd/arvados-server/keepstore.service [moved from services/keepstore/keepstore.service with 95% similarity]
doc/_includes/_gpg_key_fingerprint.liquid [moved from doc/_includes/_install_redhat_key.liquid with 82% similarity]
doc/_includes/_install_debian_key.liquid
doc/_includes/_install_ruby_and_bundler.liquid
doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
doc/install/install-keep-web.html.textile.liquid
doc/install/packages.html.textile.liquid
doc/install/singularity.html.textile.liquid
doc/user/topics/arvados-sync-groups.html.textile.liquid
lib/boot/supervisor.go
lib/config/config.default.yml
lib/config/generated_config.go
lib/crunchrun/singularity.go
sdk/cli/arvados-cli.gemspec
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/arvdocker.py
sdk/cwl/arvados_cwl/arvtool.py
sdk/cwl/arvados_cwl/executor.py
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/cwl/tests/test_container.py
sdk/cwl/tests/test_submit.py
sdk/cwl/tests/wf/runin-reqs-wf.cwl
sdk/cwl/tests/wf/runin-reqs-wf2.cwl
sdk/cwl/tests/wf/runin-reqs-wf4.cwl
sdk/cwl/tests/wf/runin-reqs-wf5.cwl
sdk/cwl/tests/wf/runin-wf.cwl
sdk/cwl/tests/wf/scatter2_subwf.cwl
sdk/go/arvados/fs_project_test.go
sdk/go/arvados/fs_site_test.go
sdk/python/arvados/api.py
sdk/python/arvados/collection.py
sdk/python/arvados/keep.py
sdk/python/tests/run_test_server.py
sdk/python/tests/test_api.py
sdk/python/tests/test_keep_client.py
sdk/ruby/arvados.gemspec
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/models/group.rb
services/api/test/fixtures/groups.yml
services/keepstore/azure_blob_volume.go
services/keepstore/azure_blob_volume_test.go
services/keepstore/bufferpool.go
services/keepstore/bufferpool_test.go
services/keepstore/collision.go
services/keepstore/collision_test.go
services/keepstore/command.go
services/keepstore/command_test.go
services/keepstore/count.go
services/keepstore/gocheck_test.go
services/keepstore/handler_test.go
services/keepstore/handlers.go
services/keepstore/keepstore.go
services/keepstore/metrics.go
services/keepstore/mock_mutex_for_test.go
services/keepstore/mounts_test.go
services/keepstore/perms.go
services/keepstore/perms_test.go
services/keepstore/pipe_adapters.go
services/keepstore/proxy_remote.go
services/keepstore/proxy_remote_test.go
services/keepstore/pull_worker.go
services/keepstore/pull_worker_integration_test.go
services/keepstore/pull_worker_test.go
services/keepstore/s3_volume.go
services/keepstore/s3_volume_test.go
services/keepstore/s3aws_volume.go
services/keepstore/s3aws_volume_test.go
services/keepstore/stats_ticker.go
services/keepstore/status_test.go
services/keepstore/trash_worker.go
services/keepstore/trash_worker_test.go
services/keepstore/unix_volume.go
services/keepstore/unix_volume_test.go
services/keepstore/volume.go
services/keepstore/volume_generic_test.go
services/keepstore/volume_test.go
services/keepstore/work_queue.go
services/keepstore/work_queue_test.go
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/Dockerfile.demo
tools/arvbox/lib/arvbox/docker/createusers.sh
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run [changed from file to symlink]
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls
tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls
tools/salt-install/tests/run-test.sh

index 7829c8c6cd61792535960a153bb20baf1b7e1622..26ed168aa6c60365109e63c8bfef83d8831dfe68 100755 (executable)
@@ -295,7 +295,7 @@ package_go_binary services/keep-balance keep-balance \
     "Rebalance and garbage-collect data blocks stored in Arvados Keep"
 package_go_binary services/keepproxy keepproxy \
     "Make a Keep cluster accessible to clients that are not on the LAN"
-package_go_binary services/keepstore keepstore \
+package_go_binary cmd/arvados-server keepstore \
     "Keep storage daemon, accessible to clients on the LAN"
 package_go_binary services/keep-web keep-web \
     "Static web hosting service for user data stored in Arvados Keep"
index 71da30ce43be5bc0b9e53fc2598fdc9face4e8c7..d318bc60de5fd233ef225074442f5e50e570ea18 100755 (executable)
@@ -1082,9 +1082,7 @@ install_deps() {
     do_install services/api
     do_install services/arv-git-httpd go
     do_install services/keepproxy go
-    do_install services/keepstore go
     do_install services/keep-web go
-    do_install services/ws go
 }
 
 install_all() {
index 4b94a7813869915c38c14ec7927a8a2662e30475..c8b945bea49c30d10270822ef18c58b22a92a103 100644 (file)
@@ -17,6 +17,7 @@ import (
        "git.arvados.org/arvados.git/lib/install"
        "git.arvados.org/arvados.git/lib/lsf"
        "git.arvados.org/arvados.git/lib/recovercollection"
+       "git.arvados.org/arvados.git/services/keepstore"
        "git.arvados.org/arvados.git/services/ws"
 )
 
@@ -37,6 +38,7 @@ var (
                "dispatch-lsf":       lsf.DispatchCommand,
                "install":            install.Command,
                "init":               install.InitCommand,
+               "keepstore":          keepstore.Command,
                "recover-collection": recovercollection.Command,
                "ws":                 ws.Command,
        })
similarity index 95%
rename from services/keepstore/keepstore.service
rename to cmd/arvados-server/keepstore.service
index 1f14c3f464c4b3a701a1f800e99534865a058908..bcfde3a7881f0c9d7a3217236d773e7845b2458c 100644 (file)
@@ -6,6 +6,7 @@
 Description=Arvados Keep Storage Daemon
 Documentation=https://doc.arvados.org/
 After=network.target
+AssertPathExists=/etc/arvados/config.yml
 
 # systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
similarity index 82%
rename from doc/_includes/_install_redhat_key.liquid
rename to doc/_includes/_gpg_key_fingerprint.liquid
index c9430515f16619ca1aeda22f387573d113aef6e4..a10fd8688db40d53638b9f068518e585181b91d2 100644 (file)
@@ -10,7 +10,6 @@ The Arvados signing key fingerprint is
 <pre><code>pub   rsa2048 2010-11-15 [SC]
       B2DA 2991 656E B4A5 0314  CA2B 5716 5911 1078 ECD7
 uid           [ unknown] Arvados Automatic Signing Key <sysadmin@arvados.org>
-uid           [ unknown] Curoverse, Inc Automatic Signing Key <sysadmin@curoverse.com>
 sub   rsa2048 2010-11-15 [E]
 </code></pre>
 </notextile>
index adfdff8733a3d779893a35089fb8c1e0c0025e99..1d5f73a4d28ef9d6863832668f6a8474f756204d 100644 (file)
@@ -4,10 +4,14 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
+<notextile>
+<pre><code># <span class="userinput">apt-get --no-install-recommends install curl gnupg2</span>
+# <span class="userinput">curl https://apt.arvados.org/pubkey.gpg -o /etc/apt/trusted.gpg.d/arvados.asc</span>
+</code></pre>
+</notextile>
 
+The Arvados package signing GPG key is also available via the keyservers, though they can be unreliable. To retrieve the signing key via keyserver.ubuntu.com:
 
 <notextile>
-<pre><code># <span class="userinput">apt-get --no-install-recommends install gnupg</span>
-# <span class="userinput">/usr/bin/apt-key adv --keyserver pgp.mit.edu --recv 1078ECD7</span>
-</code></pre>
+<pre><code># <span class="userinput">/usr/bin/apt-key adv --keyserver keyserver.ubuntu.com --recv 1078ECD7</code></pre>
 </notextile>
index fe7714c62feae18bd12fa6c85890ec7ba14182a3..ffaa1a15833c2f2ea2a7526dd4926abce7e02a1a 100644 (file)
@@ -22,10 +22,10 @@ The Ruby version shipped with Centos 7 is too old.  Use "RVM":#rvm to install Ru
 
 h3. Debian and Ubuntu
 
-Debian 10 (buster) and Ubuntu 18.04 (bionic) and later ship with Ruby 2.5, which is supported by Arvados.
+Debian 10 (buster) and Ubuntu 18.04 (bionic) and later ship with Ruby 2.5 or newer, which is sufficient for Arvados.
 
 <notextile>
-<pre><code># <span class="userinput">apt-get --no-install-recommends install ruby ruby-dev bundler</span></code></pre>
+<pre><code># <span class="userinput">apt-get --no-install-recommends install ruby ruby-dev</span></code></pre>
 </notextile>
 
 h2(#rvm). Option 2: Install with RVM
@@ -44,34 +44,30 @@ h4. Debian and Ubuntu
 apt-get --no-install-recommends install gpg curl
 </pre>
 
-h3. Install RVM
+h3. Install RVM, Ruby and Bundler
 
 <notextile>
 <pre><code># <span class="userinput">gpg --keyserver pgp.mit.edu --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
 \curl -sSL https://get.rvm.io | bash -s stable --ruby=2.5
 </span></code></pre></notextile>
 
+This command installs the latest Ruby 2.5.x release, as well as the @gem@ and @bundle@ commands.
+
 To use Ruby installed from RVM, load it in an open shell like this:
 
 <notextile>
-<pre><code><span class="userinput">. /usr/local/rvm/scripts/rvm
+<pre><code><span class="userinput">source /usr/local/rvm/scripts/rvm
 </span></code></pre></notextile>
 
 Alternately you can use @rvm-exec@ (the first parameter is the ruby version to use, or "default"), for example:
 
 <notextile>
-<pre><code><span class="userinput">rvm-exec default rails console
+<pre><code><span class="userinput">rvm-exec default ruby -v
 </span></code></pre></notextile>
 
-Finally, install Bundler:
-
-<notextile>
-<pre><code>~$ <span class="userinput">gem install bundler</span>
-</code></pre></notextile>
-
 h2(#fromsource). Option 3: Install from source
 
-Install prerequisites for Debian 10:
+Install prerequisites for Debian 10, Ubuntu 18.04 and Ubuntu 20.04:
 
 <notextile>
 <pre><code><span class="userinput">sudo apt-get install \
@@ -89,25 +85,20 @@ Install prerequisites for CentOS 7:
     make automake libtool bison sqlite-devel tar
 </span></code></pre></notextile>
 
-Install prerequisites for Ubuntu 16.04:
-
-<notextile>
-<pre><code><span class="userinput">sudo apt-get install \
-    bison build-essential gettext libcurl3 \
-    libcurl3-openssl-dev libpcre3-dev libreadline-dev \
-    libssl-dev libxslt1.1 zlib1g-dev
-</span></code></pre></notextile>
-
 Build and install Ruby:
 
 <notextile>
 <pre><code><span class="userinput">mkdir -p ~/src
 cd ~/src
-curl -f http://cache.ruby-lang.org/pub/ruby/2.5/ruby-2.5.5.tar.gz | tar xz
-cd ruby-2.5.5
+curl -f http://cache.ruby-lang.org/pub/ruby/2.5/ruby-2.5.8.tar.gz | tar xz
+cd ruby-2.5.8
 ./configure --disable-install-rdoc
 make
 sudo make install
 
+# Make sure the post install script can find the gem and ruby executables
+sudo ln -s /usr/local/bin/gem /usr/bin/gem
+sudo ln -s /usr/local/bin/ruby /usr/bin/ruby
+# Install bundler
 sudo -i gem install bundler</span>
 </code></pre></notextile>
index 301fd7306ac82bbec8cc90978d831097f5937e7a..6fda506888f85692a03800269dec87a010178ba1 100644 (file)
@@ -13,18 +13,17 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 arvados-dispatch-lsf is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
-Containers can be dispatched to an LSF cluster.  The dispatcher sends work to the cluster using LSF's @bsub@ command, so it works in a variety of LSF configurations.
-
-*LSF support is currently considered experimental.*
+h2(#overview). Overview
 
-Limitations include:
-* Arvados container priority is not propagated to LSF job priority. This can cause inefficient use of compute resources, and even deadlock if there are fewer compute nodes than concurrent Arvados workflows.
-* Combining LSF with docker may not work, depending on LSF configuration and user/group IDs (if LSF only sets up the configured user's primary group ID when executing the crunch-run process on a compute node, it may not have permission to connect to the docker daemon).
+Containers can be dispatched to an LSF cluster.  The dispatcher sends work to the cluster using LSF's @bsub@ command, so it works in a variety of LSF configurations.
 
 In order to run containers, you must choose a user that has permission to set up FUSE mounts and run Singularity/Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
 
 Set up all of your compute nodes "as you would for a SLURM cluster":../crunch2-slurm/install-compute-node.html.
 
+*Current limitations*:
+* Arvados container priority is not propagated to LSF job priority. This can cause inefficient use of compute resources, and even deadlock if there are fewer compute nodes than concurrent Arvados workflows.
+* Combining LSF with docker may not work, depending on LSF configuration and user/group IDs (if LSF only sets up the configured user's primary group ID when executing the crunch-run process on a compute node, it may not have permission to connect to the docker daemon).
 
 h2(#update-config). Update config.yml
 
@@ -32,7 +31,6 @@ Arvados-dispatch-lsf reads the common configuration file at @/etc/arvados/config
 
 Review the following configuration parameters and adjust as needed.
 
-
 h3(#BsubSudoUser). Containers.LSF.BsubSudoUser
 
 arvados-dispatch-lsf uses @sudo@ to execute @bsub@, for example @sudo -E -u crunch bsub [...]@. This means the @crunch@ account must exist on the hosts where LSF jobs run ("execution hosts"), as well as on the host where you are installing the Arvados LSF dispatcher (the "submission host"). To use a user account other than @crunch@, configure @BsubSudoUser@:
index 9f63d1bcfcf8ad3def1968700c58930c4e7a3ccb..ea2ffb5e4889a4329fa6cc94f0d9a474722aa7ba 100644 (file)
@@ -20,7 +20,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#introduction). Introduction
 
-The Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols.  This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, typically behind a web proxy that provides TLS support. See the "godoc page":http://godoc.org/github.com/curoverse/arvados/services/keep-web for more detail.
+The Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols.  This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, typically behind a web proxy that provides TLS support. See the "godoc page":https://pkg.go.dev/git.arvados.org/arvados.git/services/keep-web for more detail.
 
 h2(#dns). Configure DNS
 
index fb296ad5ad47019024d49fe6e0bf61f30a48b0d5..a111843a6545f646d40064768967d6e01f005af4 100644 (file)
@@ -23,11 +23,11 @@ Packages are available for CentOS 7. To install them with yum, save this configu
 name=Arvados
 baseurl=http://rpm.arvados.org/CentOS/$releasever/os/$basearch/
 gpgcheck=1
-gpgkey=http://rpm.arvados.org/CentOS/RPM-GPG-KEY-curoverse
+gpgkey=http://rpm.arvados.org/CentOS/RPM-GPG-KEY-arvados
 </code></pre>
 </notextile>
 
-{% include 'install_redhat_key' %}
+{% include 'gpg_key_fingerprint' %}
 
 h3(#debian). Debian and Ubuntu
 
@@ -37,6 +37,8 @@ First, register the Arvados signing key in apt's database:
 
 {% include 'install_debian_key' %}
 
+{% include 'gpg_key_fingerprint' %}
+
 As root, add the Arvados package repository to your sources.  This command depends on your OS vendor and version:
 
 table(table table-bordered table-condensed).
index 1f382539589e588786893bad334bf4491a843da5..bd990b491b77741172d59e773673d99d97fbdc1d 100644 (file)
@@ -9,22 +9,25 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
+h2(#overview). Overview
+
 Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a SLURM/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups.
 
-Please note:
-* *Singularity support is currently considered experimental.*
+*Current limitations*:
 * Even when using the singularity runtime, users' container images are expected to be saved in Docker format using @arv keep docker@. Arvados converts the Docker image to Singularity format (@.sif@) at runtime as needed. Specifying a @.sif@ file as an image when submitting a container request is not yet supported.
 * Singularity does not limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by SLURM/LSF.
 * Programs running in containers may behave differently due to differences between Singularity and Docker.
 ** The root (image) filesystem is read-only in a Singularity container. Programs that attempt to write outside a designated output or temporary directory are likely to fail.
 ** The Docker ENTRYPOINT instruction is ignored.
-* Arvados is currently tested with Singularity version 3.5.2.
+* Arvados is tested with Singularity version 3.7.4. Other versions may not work.
+
+h2(#configuration). Configuration
 
-To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.5/user-guide/quick_start.html on your cloud worker image or SLURM/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
+To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or SLURM/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
 
 <notextile>
 <pre><code>$ <span class="userinput">singularity version</span>
-3.5.2
+3.7.4
 $ <span class="userinput">mksquashfs -version</span>
 mksquashfs version 4.3-git (2014/06/09)
 [...]
@@ -34,7 +37,7 @@ mksquashfs version 4.3-git (2014/06/09)
 Then update @Containers.RuntimeEngine@ in your cluster configuration:
 
 <notextile>
-<pre><code>      # Container runtime: "docker" (default) or "singularity" (experimental)
+<pre><code>      # Container runtime: "docker" (default) or "singularity"
       RuntimeEngine: singularity
 </code></pre>
 </notextile>
index 7d831bf04021633ec5802d2616baca31fa90e4f0..26be56782de0633f6a485b9b29624cabd60d4bd7 100644 (file)
@@ -32,11 +32,12 @@ The following command line options are supported:
 
 table(table table-bordered table-condensed).
 |_. Option |_. Description |
-|==--help==|             This list of options|
-|==--parent-group-uuid==|   UUID of group to own all the externally synchronized groups|
-|==--user-id== |            Identifier to use in looking up user. One of 'email' or 'username' (Default: 'email')|
-|==--verbose==|             Log informational messages (Default: False)|
-|==--version==|             Print version and exit|
+|==--help==|This list of options|
+|==--case-insensitive==|Uses case-insensitive username matching|
+|==--parent-group-uuid==|UUID of group to own all the externally synchronized groups|
+|==--user-id==|Identifier to use in looking up user. One of 'email' or 'username' (Default: 'email')|
+|==--verbose==|Log informational messages (Default: False)|
+|==--version==|Print version and exit|
 
 h2. Examples
 
index 2026b8c843fc16d12cc4eef6a0a547f3a1b9b164..2c89ccdb0018d1026b7ff9b7f2b14714f7fe8668 100644 (file)
@@ -245,7 +245,7 @@ func (super *Supervisor) run(cfg *arvados.Config) error {
                runGoProgram{src: "services/arv-git-httpd", svc: super.cluster.Services.GitHTTP},
                runGoProgram{src: "services/health", svc: super.cluster.Services.Health},
                runGoProgram{src: "services/keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
-               runGoProgram{src: "services/keepstore", svc: super.cluster.Services.Keepstore},
+               runServiceCommand{name: "keepstore", svc: super.cluster.Services.Keepstore},
                runGoProgram{src: "services/keep-web", svc: super.cluster.Services.WebDAV},
                runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{seedDatabase{}}},
                installPassenger{src: "services/api"},
index 740d75836ac4482cca86e036e7ce1cb6a39345cc..4e2a0e26d4bb599314e6f3292aeaf6cc1b2fef85 100644 (file)
@@ -683,7 +683,7 @@ Clusters:
         AcceptAccessTokenScope: ""
 
       PAM:
-        # (Experimental) Use PAM to authenticate users.
+        # Use PAM to authenticate users.
         Enable: false
 
         # PAM service name. PAM will apply the policy in the
@@ -881,8 +881,8 @@ Clusters:
       UsePreemptibleInstances: false
 
       # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
-      # (experimental) cloud dispatcher for executing containers on
-      # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
+      # cloud dispatcher for executing containers on worker VMs.
+      # Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
       # and ends with "\n-----END RSA PRIVATE KEY-----\n".
       DispatchPrivateKey: ""
 
@@ -908,7 +908,7 @@ Clusters:
       # Minimum time between two attempts to run the same container
       MinRetryPeriod: 0s
 
-      # Container runtime: "docker" (default) or "singularity" (experimental)
+      # Container runtime: "docker" (default) or "singularity"
       RuntimeEngine: docker
 
       Logging:
@@ -1057,7 +1057,7 @@ Clusters:
         GitInternalDir: /var/lib/arvados/internal.git
 
       CloudVMs:
-        # Enable the cloud scheduler (experimental).
+        # Enable the cloud scheduler.
         Enable: false
 
         # Name/number of port where workers' SSH services listen.
index 5a46121a43289c7a11166eee4c9a501c72dcc18d..875939a3e191731d33892127f5376ed93c18af78 100644 (file)
@@ -689,7 +689,7 @@ Clusters:
         AcceptAccessTokenScope: ""
 
       PAM:
-        # (Experimental) Use PAM to authenticate users.
+        # Use PAM to authenticate users.
         Enable: false
 
         # PAM service name. PAM will apply the policy in the
@@ -887,8 +887,8 @@ Clusters:
       UsePreemptibleInstances: false
 
       # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
-      # (experimental) cloud dispatcher for executing containers on
-      # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
+      # cloud dispatcher for executing containers on worker VMs.
+      # Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
       # and ends with "\n-----END RSA PRIVATE KEY-----\n".
       DispatchPrivateKey: ""
 
@@ -914,7 +914,7 @@ Clusters:
       # Minimum time between two attempts to run the same container
       MinRetryPeriod: 0s
 
-      # Container runtime: "docker" (default) or "singularity" (experimental)
+      # Container runtime: "docker" (default) or "singularity"
       RuntimeEngine: docker
 
       Logging:
@@ -1063,7 +1063,7 @@ Clusters:
         GitInternalDir: /var/lib/arvados/internal.git
 
       CloudVMs:
-        # Enable the cloud scheduler (experimental).
+        # Enable the cloud scheduler.
         Enable: false
 
         # Name/number of port where workers' SSH services listen.
index 99c5cef95c169155d9673b5e59c90f7f6d81e5a6..70ad653b7d5a8934a49a33532789c4ee77da3e85 100644 (file)
@@ -227,7 +227,7 @@ func (e *singularityExecutor) Create(spec containerSpec) error {
 }
 
 func (e *singularityExecutor) Start() error {
-       args := []string{"singularity", "exec", "--containall", "--no-home", "--cleanenv", "--pwd", e.spec.WorkingDir}
+       args := []string{"singularity", "exec", "--containall", "--cleanenv", "--pwd", e.spec.WorkingDir}
        if !e.spec.EnableNetwork {
                args = append(args, "--net", "--network=none")
        }
@@ -242,7 +242,12 @@ func (e *singularityExecutor) Start() error {
        sort.Strings(binds)
        for _, path := range binds {
                mount := e.spec.BindMounts[path]
-               args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
+               if path == e.spec.Env["HOME"] {
+                       // Singularity treates $HOME as special case
+                       args = append(args, "--home", mount.HostPath+":"+path)
+               } else {
+                       args = append(args, "--bind", mount.HostPath+":"+path+":"+readonlyflag[mount.ReadOnly])
+               }
        }
 
        // This is for singularity 3.5.2. There are some behaviors
@@ -252,11 +257,11 @@ func (e *singularityExecutor) Start() error {
        env := make([]string, 0, len(e.spec.Env))
        for k, v := range e.spec.Env {
                if k == "HOME" {
-                       // $HOME is a special case
-                       args = append(args, "--home="+v)
-               } else {
-                       env = append(env, "SINGULARITYENV_"+k+"="+v)
+                       // Singularity treates $HOME as special case, this is handled
+                       // with --home above
+                       continue
                }
+               env = append(env, "SINGULARITYENV_"+k+"="+v)
        }
 
        args = append(args, e.imageFilename)
index 08fcfe3a3490739b0fcbf1bc8063fe4c7e60b94e..1ff841acdd93e67c080c51a60dfe1ae9ea45055a 100644 (file)
@@ -41,8 +41,9 @@ Gem::Specification.new do |s|
   s.required_ruby_version = '>= 2.1.0'
   s.add_runtime_dependency 'arvados', '>= 1.4.1.20190320201707'
   # Our google-api-client dependency used to be < 0.9, but that could be
-  # satisfied by the buggy 0.9.pre*.  https://dev.arvados.org/issues/9213
-  s.add_runtime_dependency 'arvados-google-api-client', '~> 0.6', '>= 0.6.3', '<0.8.9'
+  # satisfied by the buggy 0.9.pre*, cf. https://dev.arvados.org/issues/9213
+  # We need at least version 0.8.7.3, cf. https://dev.arvados.org/issues/15673
+  s.add_runtime_dependency('arvados-google-api-client', '>= 0.8.7.3', '< 0.8.9')
   s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 5.3'
   s.add_runtime_dependency 'json', '>= 1.7.7', '<3'
   s.add_runtime_dependency 'optimist', '~> 3.0'
@@ -51,8 +52,6 @@ Gem::Specification.new do |s|
   s.add_runtime_dependency 'oj', '< 3.10.9'
   s.add_runtime_dependency 'curb', '~> 0.8'
   s.add_runtime_dependency 'launchy', '< 2.5'
-  # arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
-  s.add_dependency('faraday', '< 0.16')
   s.homepage    =
     'https://arvados.org'
 end
index ee636be371a4f385c13c6352d077527e362145db..71ef742e314633bab08b0f493f766227b19b7849 100644 (file)
@@ -301,7 +301,7 @@ def main(args, stdout, stderr, api_client=None, keep_client=None,
             api_client.users().current().execute()
         if keep_client is None:
             keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4)
-        executor = ArvCwlExecutor(api_client, arvargs, keep_client=keep_client, num_retries=4)
+        executor = ArvCwlExecutor(api_client, arvargs, keep_client=keep_client, num_retries=4, stdout=stdout)
     except WorkflowException as e:
         logger.error(e, exc_info=(sys.exc_info()[1] if arvargs.debug else False))
         return 1
index 1e79566f4055578ce61c0b37cd9c753429e1da51..ae3c6688955301141f0af3405787c9f831fb58b7 100644 (file)
@@ -283,11 +283,13 @@ class ArvadosContainer(JobBase):
         if self.output_ttl < 0:
             raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
 
-        storage_class_req, _ = self.get_requirement("http://arvados.org/cwl#OutputStorageClass")
-        if storage_class_req and storage_class_req.get("intermediateStorageClass"):
-            container_request["output_storage_classes"] = aslist(storage_class_req["intermediateStorageClass"])
-        else:
-            container_request["output_storage_classes"] = runtimeContext.intermediate_storage_classes.strip().split(",")
+
+        if self.arvrunner.api._rootDesc["revision"] >= "20210628":
+            storage_class_req, _ = self.get_requirement("http://arvados.org/cwl#OutputStorageClass")
+            if storage_class_req and storage_class_req.get("intermediateStorageClass"):
+                container_request["output_storage_classes"] = aslist(storage_class_req["intermediateStorageClass"])
+            else:
+                container_request["output_storage_classes"] = runtimeContext.intermediate_storage_classes.strip().split(",")
 
         if self.timelimit is not None and self.timelimit > 0:
             scheduling_parameters["max_run_time"] = self.timelimit
@@ -518,10 +520,10 @@ class RunnerContainer(Runner):
         if runtimeContext.debug:
             command.append("--debug")
 
-        if runtimeContext.storage_classes != "default":
+        if runtimeContext.storage_classes != "default" and runtimeContext.storage_classes:
             command.append("--storage-classes=" + runtimeContext.storage_classes)
 
-        if runtimeContext.intermediate_storage_classes != "default":
+        if runtimeContext.intermediate_storage_classes != "default" and runtimeContext.intermediate_storage_classes:
             command.append("--intermediate-storage-classes=" + runtimeContext.intermediate_storage_classes)
 
         if self.on_error:
index 3c820827132e378ca88efe4eca9e76ea7df468ef..26408317cbe6d0cdb5382f38c1455b0bd7b5db2a 100644 (file)
@@ -49,8 +49,10 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid
         if not images:
             # Fetch Docker image if necessary.
             try:
-                cwltool.docker.DockerCommandLineJob.get_image(dockerRequirement, pull_image,
+                result = cwltool.docker.DockerCommandLineJob.get_image(dockerRequirement, pull_image,
                                                               force_pull, tmp_outdir_prefix)
+                if not result:
+                    raise WorkflowException("Docker image '%s' not available" % dockerRequirement["dockerImageId"])
             except OSError as e:
                 raise WorkflowException("While trying to get Docker image '%s', failed to execute 'docker': %s" % (dockerRequirement["dockerImageId"], e))
 
index 83648f46aa89424652323729b0241e85d2d125e8..b66e8ad3aac6b73b3bb086a60a1403c8a6cf7a64 100644 (file)
@@ -62,7 +62,7 @@ class ArvadosCommandTool(CommandLineTool):
         (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
         if not docker_req:
             self.hints.append({"class": "DockerRequirement",
-                               "dockerImageId": "arvados/jobs:"+__version__})
+                               "dockerPull": "arvados/jobs:"+__version__})
 
         self.arvrunner = arvrunner
 
index edb9d5b523c09bee4aa43f16705e27f2f15194d9..aa19633d8c7e86067a02d823d0c638abb318ad4d 100644 (file)
@@ -99,7 +99,8 @@ class ArvCwlExecutor(object):
                  arvargs=None,
                  keep_client=None,
                  num_retries=4,
-                 thread_count=4):
+                 thread_count=4,
+                 stdout=sys.stdout):
 
         if arvargs is None:
             arvargs = argparse.Namespace()
@@ -132,6 +133,7 @@ class ArvCwlExecutor(object):
         self.should_estimate_cache_size = True
         self.fs_access = None
         self.secret_store = None
+        self.stdout = stdout
 
         if keep_client is not None:
             self.keep_client = keep_client
@@ -549,7 +551,7 @@ The 'jobs' API is no longer supported.
         if runtimeContext.submit_request_uuid and self.work_api != "containers":
             raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
 
-        default_storage_classes = ",".join([k for k,v in self.api.config()["StorageClasses"].items() if v.get("Default") is True])
+        default_storage_classes = ",".join([k for k,v in self.api.config().get("StorageClasses", {"default": {"Default": True}}).items() if v.get("Default") is True])
         if runtimeContext.storage_classes == "default":
             runtimeContext.storage_classes = default_storage_classes
         if runtimeContext.intermediate_storage_classes == "default":
@@ -602,14 +604,15 @@ The 'jobs' API is no longer supported.
         if existing_uuid or runtimeContext.create_workflow:
             # Create a pipeline template or workflow record and exit.
             if self.work_api == "containers":
-                return (upload_workflow(self, tool, job_order,
+                uuid = upload_workflow(self, tool, job_order,
                                         self.project_uuid,
                                         uuid=existing_uuid,
                                         submit_runner_ram=runtimeContext.submit_runner_ram,
                                         name=runtimeContext.name,
                                         merged_map=merged_map,
-                                        submit_runner_image=runtimeContext.submit_runner_image),
-                        "success")
+                                        submit_runner_image=runtimeContext.submit_runner_image)
+                self.stdout.write(uuid + "\n")
+                return (None, "success")
 
         self.apply_reqs(job_order, tool)
 
@@ -679,7 +682,8 @@ The 'jobs' API is no longer supported.
         if runtimeContext.submit and not runtimeContext.wait:
             runnerjob = next(jobiter)
             runnerjob.run(runtimeContext)
-            return (runnerjob.uuid, "success")
+            self.stdout.write(runnerjob.uuid+"\n")
+            return (None, "success")
 
         current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
         if current_container:
index 66dff809e477b56960f2e49317ff3996f2f9058c..ada64ae69aa62f0c1e4487bd28160c071fb3d0a1 100644 (file)
@@ -105,7 +105,8 @@ def make_builder(joborder, hints, requirements, runtimeContext, metadata):
                  outdir="",              # type: Text
                  tmpdir="",              # type: Text
                  stagedir="",            # type: Text
-                 cwlVersion=metadata.get("http://commonwl.org/cwltool#original_cwlVersion") or metadata.get("cwlVersion")
+                 cwlVersion=metadata.get("http://commonwl.org/cwltool#original_cwlVersion") or metadata.get("cwlVersion"),
+                 container_engine="docker"
                 )
 
 def search_schemadef(name, reqs):
@@ -183,7 +184,7 @@ def set_secondary(fsaccess, builder, inputschema, secondaryspec, primary, discov
             elif isinstance(pattern, dict):
                 specs.append(pattern)
             elif isinstance(pattern, str):
-                specs.append({"pattern": pattern})
+                specs.append({"pattern": pattern, "required": sf.get("required")})
             else:
                 raise SourceLine(primary["secondaryFiles"], i, validate.ValidationException).makeError(
                     "Expression must return list, object, string or null")
@@ -192,7 +193,9 @@ def set_secondary(fsaccess, builder, inputschema, secondaryspec, primary, discov
         for i, sf in enumerate(specs):
             if isinstance(sf, dict):
                 if sf.get("class") == "File":
-                    pattern = sf["basename"]
+                    pattern = None
+                    sfpath = sf["location"]
+                    required = True
                 else:
                     pattern = sf["pattern"]
                     required = sf.get("required")
@@ -203,11 +206,16 @@ def set_secondary(fsaccess, builder, inputschema, secondaryspec, primary, discov
                 raise SourceLine(primary["secondaryFiles"], i, validate.ValidationException).makeError(
                     "Expression must return list, object, string or null")
 
-            sfpath = substitute(primary["location"], pattern)
+            if pattern is not None:
+                sfpath = substitute(primary["location"], pattern)
+
             required = builder.do_eval(required, context=primary)
 
             if fsaccess.exists(sfpath):
-                found.append({"location": sfpath, "class": "File"})
+                if pattern is not None:
+                    found.append({"location": sfpath, "class": "File"})
+                else:
+                    found.append(sf)
             elif required:
                 raise SourceLine(primary["secondaryFiles"], i, validate.ValidationException).makeError(
                     "Required secondary file '%s' does not exist" % sfpath)
index 3f1f8a6bed1b3ee5a1e883ecded075d89df62b5a..0bde76e92ff5d96974c93fd34c76fa19da3e003c 100644 (file)
@@ -39,8 +39,8 @@ setup(name='arvados-cwl-runner',
       # file to determine what version of cwltool and schema-salad to
       # build.
       install_requires=[
-          'cwltool==3.1.20210816212154',
-          'schema-salad==8.2.20210902094147',
+          'cwltool==3.1.20210922203925',
+          'schema-salad==8.2.20210918131710',
           'arvados-python-client{}'.format(pysdk_dep),
           'setuptools',
           'ciso8601 >= 2.0.0',
index 8a380ff80b3c811ab2c8e050392f679674b2b20b..1a2bd112f37d15822fa9d3edc25107146f352825 100644 (file)
@@ -112,6 +112,7 @@ class TestContainer(unittest.TestCase):
             runner.ignore_docker_for_reuse = False
             runner.intermediate_output_ttl = 0
             runner.secret_store = cwltool.secrets.SecretStore()
+            runner.api._rootDesc = {"revision": "20210628"}
 
             keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
             runner.api.collections().get().execute.return_value = {
@@ -175,6 +176,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 3600
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -270,6 +272,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -398,6 +401,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -622,6 +626,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -712,6 +717,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -808,6 +814,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -851,6 +858,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
@@ -924,6 +932,7 @@ class TestContainer(unittest.TestCase):
         runner.ignore_docker_for_reuse = False
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
+        runner.api._rootDesc = {"revision": "20210628"}
 
         keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
         runner.api.collections().get().execute.return_value = {
index 8c0fcaf7419f40a9a679a2e3d569e7f4af1648db..77f70851e8dd86d1682f209e1eb27cb08d17f12d 100644 (file)
@@ -87,6 +87,7 @@ def stubs(func):
         stubs.api = mock.MagicMock()
         stubs.api._rootDesc = get_rootDesc()
         stubs.api._rootDesc["uuidPrefix"] = "zzzzz"
+        stubs.api._rootDesc["revision"] = "20210628"
 
         stubs.api.users().current().execute.return_value = {
             "uuid": stubs.fake_user_uuid,
@@ -446,7 +447,7 @@ class TestSubmit(unittest.TestCase):
                 "enableReuse": False,
             },
         ]
-        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
             "arv": "http://arvados.org/cwl#",
             "cwltool": "http://commonwl.org/cwltool#"
         }
@@ -572,6 +573,7 @@ class TestSubmit(unittest.TestCase):
     def test_default_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
         final_output_c = arvados.collection.Collection()
         make_output.return_value = ({},final_output_c)
+        stubs.api.config().get.return_value = {"default": {"Default": True}}
 
         def set_final_output(job_order, output_callback, runtimeContext):
             output_callback("zzzzz-4zz18-zzzzzzzzzzzzzzzz", "success")
@@ -1032,7 +1034,7 @@ class TestSubmit(unittest.TestCase):
                 "keep_cache": 512
             }
         ]
-        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
             "arv": "http://arvados.org/cwl#",
         }
         expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
@@ -1126,9 +1128,6 @@ class TestSubmit(unittest.TestCase):
                     "content": {
                         "$graph": [
                             {
-                                "$namespaces": {
-                                    "cwltool": "http://commonwl.org/cwltool#"
-                                },
                                 "arguments": [
                                     "md5sum",
                                     "example.conf"
@@ -1217,6 +1216,9 @@ class TestSubmit(unittest.TestCase):
                                 ]
                             }
                         ],
+                        "$namespaces": {
+                            "cwltool": "http://commonwl.org/cwltool#"
+                        },
                         "cwlVersion": "v1.0"
                     },
                     "kind": "json"
@@ -1445,7 +1447,7 @@ class TestSubmit(unittest.TestCase):
                 ],
             }
         ]
-        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
             "arv": "http://arvados.org/cwl#"
         }
 
index 3cc52936eaa89781599920d43fe2130b322bd46b..22cc82b7f33b970134f8faac4577794b190d2f44 100644 (file)
@@ -24,7 +24,7 @@ requirements:
   StepInputExpressionRequirement: {}
 hints:
   DockerRequirement:
-    dockerPull: arvados/jobs:1.4.0.20190604172024
+    dockerPull: arvados/jobs:2.2.2
 steps:
   substep:
     in:
index 7d06cb308cf6ba8c65d51882865e90b39acae6c4..4bde6c562c1ed66fbbabbe1a9f509c6fcee9fe57 100644 (file)
@@ -24,7 +24,7 @@ requirements:
   StepInputExpressionRequirement: {}
 hints:
   DockerRequirement:
-    dockerPull: arvados/jobs:1.4.0.20190604172024
+    dockerPull: arvados/jobs:2.2.2
 steps:
   substep:
     in:
index 9a26d01132cf07d6ae1cb11aff97a627807ddca5..d00ee857756854eaacc29b4d918c7f96e3c949c7 100644 (file)
@@ -24,7 +24,7 @@ requirements:
   StepInputExpressionRequirement: {}
 hints:
   DockerRequirement:
-    dockerPull: arvados/jobs:1.4.0.20190604172024
+    dockerPull: arvados/jobs:2.2.2
 steps:
   substep:
     in:
index 34d7b2c39a05ffa5110e6b5b7d853930b901aa24..647b07edfa6409a727e5ec595ad5a3ae7d34c5bb 100644 (file)
@@ -24,7 +24,7 @@ requirements:
   StepInputExpressionRequirement: {}
 hints:
   DockerRequirement:
-    dockerPull: arvados/jobs:1.4.0.20190604172024
+    dockerPull: arvados/jobs:2.2.2
 steps:
   substep:
     in:
index 68a26a0d361a36fe609b981af3b180901a632331..f819d0fe68a66a220fba98d1908b03b72308822a 100644 (file)
@@ -36,7 +36,7 @@ steps:
     hints:
       - class: arv:RunInSingleContainer
       - class: DockerRequirement
-        dockerPull: arvados/jobs:1.4.0.20190604172024
+        dockerPull: arvados/jobs:2.2.2
     run:
       class: Workflow
       id: mysub
index c54e1707ff0ef7fdb741305ff6804bb19cfdbaac..218b0c5018df3963596a65854a499d2e0c7e2295 100644 (file)
@@ -86,5 +86,8 @@
       ]
     }
   ],
+  "$namespaces": {
+    "arv": "http://arvados.org/cwl#"
+  },
   "cwlVersion": "v1.0"
-}
\ No newline at end of file
+}
index 0564e2fae61a2c85e68d677b0344572014e184e8..f68e7c8b08e97b4dd2fdd9bd9252ecd563060b59 100644 (file)
@@ -73,6 +73,30 @@ func (s *SiteFSSuite) TestFilterGroup(c *check.C) {
 
        _, err = s.fs.OpenFile("/fg3/A Subproject", 0, 0)
        c.Assert(err, check.Not(check.IsNil))
+
+       // An 'exists' 'arvados#collection' filter means only collections with certain properties should be returned.
+       s.fs.MountProject("fg4", fixtureAFilterGroupFourUUID)
+
+       _, err = s.fs.Stat("/fg4/collection with list property with odd values")
+       c.Assert(err, check.IsNil)
+
+       _, err = s.fs.Stat("/fg4/collection with list property with even values")
+       c.Assert(err, check.IsNil)
+
+       // A 'contains' 'arvados#collection' filter means only collections with certain properties should be returned.
+       s.fs.MountProject("fg5", fixtureAFilterGroupFiveUUID)
+
+       _, err = s.fs.Stat("/fg5/collection with list property with odd values")
+       c.Assert(err, check.IsNil)
+
+       _, err = s.fs.Stat("/fg5/collection with list property with string value")
+       c.Assert(err, check.IsNil)
+
+       _, err = s.fs.Stat("/fg5/collection with prop2 5")
+       c.Assert(err, check.Not(check.IsNil))
+
+       _, err = s.fs.Stat("/fg5/collection with list property with even values")
+       c.Assert(err, check.Not(check.IsNil))
 }
 
 func (s *SiteFSSuite) TestCurrentUserHome(c *check.C) {
index 3c7c146f6975f26e36e9966ee0c17be7171a9dc6..51ca88764e6625dd25428a526f325e6acf138af8 100644 (file)
@@ -21,6 +21,8 @@ const (
        fixtureThisFilterGroupUUID          = "zzzzz-j7d0g-thisfiltergroup"
        fixtureAFilterGroupTwoUUID          = "zzzzz-j7d0g-afiltergrouptwo"
        fixtureAFilterGroupThreeUUID        = "zzzzz-j7d0g-filtergroupthre"
+       fixtureAFilterGroupFourUUID         = "zzzzz-j7d0g-filtergroupfour"
+       fixtureAFilterGroupFiveUUID         = "zzzzz-j7d0g-filtergroupfive"
        fixtureFooAndBarFilesInDirUUID      = "zzzzz-4zz18-foonbarfilesdir"
        fixtureFooCollectionName            = "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
        fixtureFooCollectionPDH             = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
index 86d24dfc06a7edf8cf73ac315904f8b4bfb3a908..88596211d4e06b028e6974df2d31a9eee1cf7e19 100644 (file)
@@ -14,6 +14,7 @@ import logging
 import os
 import re
 import socket
+import ssl
 import sys
 import time
 import types
@@ -57,58 +58,67 @@ class OrderedJsonModel(apiclient.model.JsonModel):
 
 
 def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
-    if (self.max_request_size and
-        kwargs.get('body') and
-        self.max_request_size < len(kwargs['body'])):
-        raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
-
-    if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
-        headers['X-External-Client'] = '1'
-
-    headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
     if not headers.get('X-Request-Id'):
         headers['X-Request-Id'] = self._request_id()
+    try:
+        if (self.max_request_size and
+            kwargs.get('body') and
+            self.max_request_size < len(kwargs['body'])):
+            raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
 
-    retryable = method in [
-        'DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT']
-    retry_count = self._retry_count if retryable else 0
-
-    if (not retryable and
-        time.time() - self._last_request_time > self._max_keepalive_idle):
-        # High probability of failure due to connection atrophy. Make
-        # sure this request [re]opens a new connection by closing and
-        # forgetting all cached connections first.
-        for conn in self.connections.values():
-            conn.close()
-        self.connections.clear()
-
-    delay = self._retry_delay_initial
-    for _ in range(retry_count):
-        self._last_request_time = time.time()
-        try:
-            return self.orig_http_request(uri, method, headers=headers, **kwargs)
-        except http.client.HTTPException:
-            _logger.debug("Retrying API request in %d s after HTTP error",
-                          delay, exc_info=True)
-        except socket.error:
-            # This is the one case where httplib2 doesn't close the
-            # underlying connection first.  Close all open
-            # connections, expecting this object only has the one
-            # connection to the API server.  This is safe because
-            # httplib2 reopens connections when needed.
-            _logger.debug("Retrying API request in %d s after socket error",
-                          delay, exc_info=True)
+        if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
+            headers['X-External-Client'] = '1'
+
+        headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
+
+        retryable = method in [
+            'DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT']
+        retry_count = self._retry_count if retryable else 0
+
+        if (not retryable and
+            time.time() - self._last_request_time > self._max_keepalive_idle):
+            # High probability of failure due to connection atrophy. Make
+            # sure this request [re]opens a new connection by closing and
+            # forgetting all cached connections first.
             for conn in self.connections.values():
                 conn.close()
-        except httplib2.SSLHandshakeError as e:
-            # Intercept and re-raise with a better error message.
-            raise httplib2.SSLHandshakeError("Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e))
+            self.connections.clear()
+
+        delay = self._retry_delay_initial
+        for _ in range(retry_count):
+            self._last_request_time = time.time()
+            try:
+                return self.orig_http_request(uri, method, headers=headers, **kwargs)
+            except http.client.HTTPException:
+                _logger.debug("[%s] Retrying API request in %d s after HTTP error",
+                              headers['X-Request-Id'], delay, exc_info=True)
+            except ssl.SSLCertVerificationError as e:
+                raise ssl.SSLCertVerificationError(e.args[0], "Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e)) from None
+            except socket.error:
+                # This is the one case where httplib2 doesn't close the
+                # underlying connection first.  Close all open
+                # connections, expecting this object only has the one
+                # connection to the API server.  This is safe because
+                # httplib2 reopens connections when needed.
+                _logger.debug("[%s] Retrying API request in %d s after socket error",
+                              headers['X-Request-Id'], delay, exc_info=True)
+                for conn in self.connections.values():
+                    conn.close()
+
+            time.sleep(delay)
+            delay = delay * self._retry_delay_backoff
 
-        time.sleep(delay)
-        delay = delay * self._retry_delay_backoff
-
-    self._last_request_time = time.time()
-    return self.orig_http_request(uri, method, headers=headers, **kwargs)
+        self._last_request_time = time.time()
+        return self.orig_http_request(uri, method, headers=headers, **kwargs)
+    except Exception as e:
+        # Prepend "[request_id] " to the error message, which we
+        # assume is the first string argument passed to the exception
+        # constructor.
+        for i in range(len(e.args or ())):
+            if type(e.args[i]) == type(""):
+                e.args = e.args[:i] + ("[{}] {}".format(headers['X-Request-Id'], e.args[i]),) + e.args[i+1:]
+                raise type(e)(*e.args)
+        raise
 
 def _patch_http_request(http, api_token):
     http.arvados_api_token = api_token
index 50cb703a56a5a0dc66a068593fc4d3ed4a855166..d03265ca44b1b6e886e57d455dadcb7a613d0b6e 100644 (file)
@@ -1344,8 +1344,8 @@ class Collection(RichCollectionBase):
 
             try:
                 self._populate()
-            except (IOError, errors.SyntaxError) as e:
-                raise errors.ArgumentError("Error processing manifest text: %s", e)
+            except errors.SyntaxError as e:
+                raise errors.ArgumentError("Error processing manifest text: %s", str(e)) from None
 
     def storage_classes_desired(self):
         return self._storage_classes_desired or []
@@ -1790,7 +1790,13 @@ class Collection(RichCollectionBase):
                             self.find_or_create(os.path.join(stream_name, name[:-2]), COLLECTION)
                     else:
                         filepath = os.path.join(stream_name, name)
-                        afile = self.find_or_create(filepath, FILE)
+                        try:
+                            afile = self.find_or_create(filepath, FILE)
+                        except IOError as e:
+                            if e.errno == errno.ENOTDIR:
+                                raise errors.SyntaxError("Dir part of %s conflicts with file of the same name.", filepath) from None
+                            else:
+                                raise e from None
                         if isinstance(afile, ArvadosFile):
                             afile.add_segment(blocks, pos, size)
                         else:
index bc07851835e2471ee9f1055b689fe6a789ea4d62..0018687ff35a585c33ce07378acb7f05e0b98522 100644 (file)
@@ -1080,6 +1080,13 @@ class KeepClient(object):
 
         self.get_counter.add(1)
 
+        request_id = (request_id or
+                      (hasattr(self, 'api_client') and self.api_client.request_id) or
+                      arvados.util.new_request_id())
+        if headers is None:
+            headers = {}
+        headers['X-Request-Id'] = request_id
+
         slot = None
         blob = None
         try:
@@ -1096,12 +1103,6 @@ class KeepClient(object):
 
             self.misses_counter.add(1)
 
-            if headers is None:
-                headers = {}
-            headers['X-Request-Id'] = (request_id or
-                                        (hasattr(self, 'api_client') and self.api_client.request_id) or
-                                        arvados.util.new_request_id())
-
             # If the locator has hints specifying a prefix (indicating a
             # remote keepproxy) or the UUID of a local gateway service,
             # read data from the indicated service(s) instead of the usual
@@ -1171,14 +1172,14 @@ class KeepClient(object):
                           for key in sorted_roots)
         if not roots_map:
             raise arvados.errors.KeepReadError(
-                "failed to read {}: no Keep services available ({})".format(
-                    loc_s, loop.last_result()))
+                "[{}] failed to read {}: no Keep services available ({})".format(
+                    request_id, loc_s, loop.last_result()))
         elif not_founds == len(sorted_roots):
             raise arvados.errors.NotFoundError(
-                "{} not found".format(loc_s), service_errors)
+                "[{}] {} not found".format(request_id, loc_s), service_errors)
         else:
             raise arvados.errors.KeepReadError(
-                "failed to read {} after {}".format(loc_s, loop.attempts_str()), service_errors, label="service")
+                "[{}] failed to read {} after {}".format(request_id, loc_s, loop.attempts_str()), service_errors, label="service")
 
     @retry.retry_method
     def put(self, data, copies=2, num_retries=None, request_id=None, classes=None):
@@ -1215,10 +1216,11 @@ class KeepClient(object):
             return loc_s
         locator = KeepLocator(loc_s)
 
+        request_id = (request_id or
+                      (hasattr(self, 'api_client') and self.api_client.request_id) or
+                      arvados.util.new_request_id())
         headers = {
-            'X-Request-Id': (request_id or
-                             (hasattr(self, 'api_client') and self.api_client.request_id) or
-                             arvados.util.new_request_id()),
+            'X-Request-Id': request_id,
             'X-Keep-Desired-Replicas': str(copies),
         }
         roots_map = {}
@@ -1275,15 +1277,15 @@ class KeepClient(object):
             return writer_pool.response()
         if not roots_map:
             raise arvados.errors.KeepWriteError(
-                "failed to write {}: no Keep services available ({})".format(
-                    data_hash, loop.last_result()))
+                "[{}] failed to write {}: no Keep services available ({})".format(
+                    request_id, data_hash, loop.last_result()))
         else:
             service_errors = ((key, roots_map[key].last_result()['error'])
                               for key in sorted_roots
                               if roots_map[key].last_result()['error'])
             raise arvados.errors.KeepWriteError(
-                "failed to write {} after {} (wanted {} copies but wrote {})".format(
-                    data_hash, loop.attempts_str(), (copies, classes), writer_pool.done()), service_errors, label="service")
+                "[{}] failed to write {} after {} (wanted {} copies but wrote {})".format(
+                    request_id, data_hash, loop.attempts_str(), (copies, classes), writer_pool.done()), service_errors, label="service")
 
     def local_store_put(self, data, copies=1, num_retries=None, classes=[]):
         """A stub for put().
index c022e6c874d23f099406b626374e7cfe5d0951c0..6d2643a967ef70374f2ff222c19bb67917ba5a0e 100644 (file)
@@ -473,7 +473,7 @@ def _start_keep(n, blob_signing=False):
     confdata['Clusters']['zzzzz']['Collections']['BlobSigning'] = blob_signing
     with open(conf, 'w') as f:
         yaml.safe_dump(confdata, f)
-    keep_cmd = ["keepstore", "-config", conf]
+    keep_cmd = ["arvados-server", "keepstore", "-config", conf]
 
     with open(_logfilename('keep{}'.format(n)), WRITE_MODE) as logf:
         with open('/dev/null') as _stdin:
index 0c4677e8a26e1245c2049bc11e7e25fa5e0d7b22..c249f46d3c8d8b2352d444a8be915534bd5c2316 100644 (file)
@@ -82,6 +82,19 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
         for msg in ["Bad UUID format", "Bad output format"]:
             self.assertIn(msg, err_s)
 
+    @mock.patch('time.sleep')
+    def test_exceptions_include_request_id(self, sleep):
+        api = arvados.api('v1')
+        api.request_id='fake-request-id'
+        api._http.orig_http_request = mock.MagicMock()
+        api._http.orig_http_request.side_effect = socket.error('mock error')
+        caught = None
+        try:
+            api.users().current().execute()
+        except Exception as e:
+            caught = e
+        self.assertRegex(str(caught), r'fake-request-id')
+
     def test_exceptions_without_errors_have_basic_info(self):
         mock_responses = {
             'arvados.humans.delete': (
index b2160e549b538655eb5863907d87fb1560ce3ba5..aa7e371bf47223773b40008fca6b924d87627ac6 100644 (file)
@@ -703,6 +703,23 @@ class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock):
             self.keep_client.head(self.locator)
         self.assertAutomaticRequestId(mock.responses[0])
 
+    def test_request_id_in_exception(self):
+        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:
+            with self.assertRaisesRegex(arvados.errors.KeepReadError, self.test_id):
+                self.keep_client.head(self.locator, request_id=self.test_id)
+
+        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:
+            with self.assertRaisesRegex(arvados.errors.KeepReadError, r'req-[a-z0-9]{20}'):
+                self.keep_client.get(self.locator)
+
+        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:
+            with self.assertRaisesRegex(arvados.errors.KeepWriteError, self.test_id):
+                self.keep_client.put(self.data, request_id=self.test_id)
+
+        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:
+            with self.assertRaisesRegex(arvados.errors.KeepWriteError, r'req-[a-z0-9]{20}'):
+                self.keep_client.put(self.data)
+
     def assertAutomaticRequestId(self, resp):
         hdr = [x for x in resp.getopt(pycurl.HTTPHEADER)
                if x.startswith('X-Request-Id: ')][0]
index 7cc2fd931c8cc3bd7c0446953e0eb49b23f09f85..b196a1c33e9feb7278ac5513afcd1b75740a9075 100644 (file)
@@ -41,13 +41,14 @@ Gem::Specification.new do |s|
   s.add_dependency('activesupport', '>= 3')
   s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
   # Our google-api-client dependency used to be < 0.9, but that could be
-  # satisfied by the buggy 0.9.pre*.  https://dev.arvados.org/issues/9213
-  s.add_dependency('arvados-google-api-client', '>= 0.7', '< 0.8.9')
+  # satisfied by the buggy 0.9.pre*, cf. https://dev.arvados.org/issues/9213
+  # We need at least version 0.8.7.3, cf. https://dev.arvados.org/issues/15673
+  s.add_dependency('arvados-google-api-client', '>= 0.8.7.3', '< 0.8.9')
   # work around undeclared dependency on i18n in some activesupport 3.x.x:
   s.add_dependency('i18n', '~> 0')
   s.add_dependency('json', '>= 1.7.7', '<3')
-  # arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
-  s.add_dependency('faraday', '< 0.16')
+  # Avoid warning on Ruby 2.7, cf. https://dev.arvados.org/issues/18247
+  s.add_dependency('faraday', '>= 0.17.4')
   s.add_runtime_dependency('jwt', '<2', '>= 0.1.5')
   s.homepage    =
     'https://arvados.org'
index 6c547262471496f035b684df9ece2a226cdadd7d..c1d4b74d6dfab1d76b84cf680aaf50ad2487da30 100644 (file)
@@ -37,7 +37,7 @@ class Arvados::V1::SchemaController < ApplicationController
         # format is YYYYMMDD, must be fixed width (needs to be lexically
         # sortable), updated manually, may be used by clients to
         # determine availability of API server features.
-        revision: "20210503",
+        revision: "20210628",
         source_version: AppVersion.hash,
         sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
         packageVersion: AppVersion.package_version,
index fd2f5f18c2ac8018b152307042e82d587f8290a7..8565b2a417efc0a28d611c2ff1ed873acfc39a8c 100644 (file)
@@ -84,8 +84,8 @@ class Group < ArvadosModel
           errors.add :properties, "when filter operator is 'is_a', attribute must be 'uuid'"
           return
         end
-        if ! ["=","<","<=",">",">=","!=","like","ilike","in","not in","is_a"].include?(filter[1].downcase)
-          errors.add :properties, "filter operator is not valid (must be =,<,<=,>,>=,!=,like,ilike,in,not in,is_a)"
+        if ! ["=","<","<=",">",">=","!=","like","ilike","in","not in","is_a","exists","contains"].include?(filter[1].downcase)
+          errors.add :properties, "filter operator is not valid (must be =,<,<=,>,>=,!=,like,ilike,in,not in,is_a,exists,contains)"
           return
         end
       end
index 48925a27027a7cbd96dba6bd5bfa5eeb67757928..9a2dc169b63aec6ff8d624bf4128c69483e0ce3b 100644 (file)
@@ -146,6 +146,32 @@ afiltergroup3:
   properties:
     filters: [["uuid", "is_a", "arvados#collection"]]
 
+afiltergroup4:
+  uuid: zzzzz-j7d0g-filtergroupfour
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: A filter group with an exists collections filter
+  group_class: filter
+  properties:
+    filters: [["collections.properties.listprop","exists",true],["uuid", "is_a", "arvados#collection"]]
+
+afiltergroup5:
+  uuid: zzzzz-j7d0g-filtergroupfive
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: A filter group with a contains collections filter
+  group_class: filter
+  properties:
+    filters: [["collections.properties.listprop","contains","elem1"],["uuid", "is_a", "arvados#collection"]]
+
 future_project_viewing_group:
   uuid: zzzzz-j7d0g-futrprojviewgrp
   owner_uuid: zzzzz-tpzed-000000000000000
index 6e8def82222eb1e169af6fa2632c21b20bc1bdf8..cf655c2a5a96d9ca92194321ad813fd5712ab523 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 3f395e40caba6be7f336470c76b2e492e1a42386..48d58ee9bfc454e5b2972e6d36867a578c29e6bb 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 623693cd12e507002d8ee29272af8185fe6965fb..b4cc5d38e1670034212816bd96b95cdc838a2cfb 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "sync"
index 72b719ba28a2b4e69f429308df4513270cbd36d8..13e1cb4f332ba180857aef747b3086e9251466ee 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
index 4d6583b26b7bb4e91a27d4dcb7e398f849c1c521..16f2d0923244b138a64eb970fef1a70dc477532e 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 0d6fd62833b296d2650e8b8d6829ca95179b995b..aa8f0cbaa1b6e43c64adb67513d6725e2cf24489 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index bf3bf1722c0cee546488ac45c0793554be905706..2a426936ed064ae3e1d3b1367157300797e793dc 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
@@ -30,10 +30,6 @@ var (
        Command = service.Command(arvados.ServiceNameKeepstore, newHandlerOrErrorHandler)
 )
 
-func main() {
-       os.Exit(runCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))
-}
-
 func runCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
        args, ok := convertKeepstoreFlagsToServiceFlags(args, ctxlog.FromContext(context.Background()))
        if !ok {
index ad2aa0957143540a01070e134abbc566b3f767e1..bbfae52f69e1feb2a4b109e7eedd1a16da984023 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index ccba4f1355c2963ed0218d2c0577432bc21ff6c6..272b5017cb0d8785a0446a5426aaec3d2c8c3230 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "io"
index 89d680fc12ca2b496fe484f9f14d24356cdbfc43..90076db5b2fd344734839d112fa4c8d31d392ae8 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "gopkg.in/check.v1"
index 16dcd2aaf6ee5d57e9bb60176a643a9116df8f9e..cbb7f38bb14100468710b74f68d28e6513b6f4d7 100644 (file)
@@ -11,7 +11,7 @@
 // The HTTP handlers are responsible for enforcing permission policy,
 // so these tests must exercise all possible permission permutations.
 
-package main
+package keepstore
 
 import (
        "bytes"
index 910033ebb1d8408c90a4bde441d7edc8d99b109a..29e7b2ca9c499e0b5eae3461084775ef7af40506 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "container/list"
index 3c9d5d15e8134cd91779bf3e9304f9511cdf8d05..b9dbe2777e5d3ccca440b1c7632e6a344b83fa35 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "time"
index b2f0aa663872df877c6d64a17fe3ebd3c75f335a..d04601fbec84128ff47cf65ea15588aa6212b9c5 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "fmt"
index 484b177230b3cdb2c581e654993bfd8dfd1027b2..daf0ef05f7cd7041250ee994f85b51cbc08ca462 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 type MockMutex struct {
        AllowLock   chan struct{}
index b60bf9658bbc5a4ca00de5250be57582905014df..e8c248219f77785458110107922983b0917fa51d 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 7b962641e6c8561bf784608589f5ba46b4c7fef7..7205a4594deee1964d6d4d2dda95e7fa68c8b7a4 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "time"
index 5460f8eda66f2b040fd82b050c18b7b8086adaaa..13223747063cd79850454088d5cb5ae08823f00b 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "strconv"
index 69ed6d2ff5f1f8d80bd6c6e6ebe7d75f7e4ff259..6b555054b65ea0ed8f537401d97c7f7765186722 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 8c88a406f4f39fb998be7edcab8cf16d598caa3f..526bc25299373ba56d5a7d27f445551c940edcb7 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
index 00161bf236eadfdd8f6881d4fe1a2194e91cf145..534371cc0ece83ef3a0cead670d1612ec8f57172 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
index 57b9469244d8e101193185bd190d6db9ce37f853..abe3dc3857d5a1652562f29bc361b4f2c95e49ca 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
index eb7fe5fd6730421937b8c1efb49e4f201f6c1ca2..3855b4ecd3cfd01c1a0589c4b01f3ceb5e17d929 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 2013c8c04da76ffbf8e22f85d501ecf4710cbfdc..2626e66d8898745b9f29c42d9beda9ee580626a4 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 4c43b3f469532c25af6cea40c3dbf94ed548b31e..ee89b156f796b49395fc6ec151de6ab12127e176 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bufio"
index 5cb8a668a3123abc6175e0ce4c8bd7604967ae11..a82098356859cb3cc481d20df453efb97e1726d0 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index fb2e97efb71d25f3d5a1726337a80ae63fc534c4..cb0b73cb8b5b495549ca87948f510e95f4751838 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index fa9e270e0b330b72749d8284da340f0bf8276b64..c7e2d485dfc6f793f107947a3340606c993f63ac 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 342b9e32058e23a1f09fc305d8fdc37caf104198..520d4530c2ac2404ca47f561b3dd08c015f1e832 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "sync"
index cafe9f72ff0e0eaf3ec7b1da842c5ac083ca4028..80f98adb225bbd829dca5fcc9753e02e333d7225 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "encoding/json"
index 4063765726013da7e45ba66fbb2d727e403b6b00..3909d90d9204d55a80252449abb88e953ace1b24 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "errors"
index 2b1ee2be13bb1466d6af5be09bd53067fd40d221..4e20c3feb451f1f3043008ae69813ee38a9bcf14 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "container/list"
index 1706473cc892c43cbd5ad27751c49f43cbebc075..a74616604814488ed9c54fa3e5026f2d4f37c907 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
index 166085809848b862a734096a4f1ff9c28223fdde..75d9b22de55604cc01a2d1f6f4ffaad7b9b585a7 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index cf0b7a390255b64d7e108b9e18b93fab4d3e0a73..c3b8cd6283e0311c93fcc914ebd3b370045cfa0f 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "context"
index a31b861236a9ebe564e91d7b64e804b9a5c3686e..0dd34e3af1be878e2602a9bf2e43fcfed29c4eb6 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index cc2d21e5a94fc30669dc808e6e6bade48613672a..950b3989aa0f6a72e20553f8505f6575a91b39c4 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "bytes"
index 56c6376ad37134d0ed7f9f12cc049255b83fcb65..4c46ec8e65f6cf546324d8f0f6e25056d65de9a9 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 /* A WorkQueue is an asynchronous thread-safe queue manager.  It
    provides a channel from which items can be read off the queue, and
index 8a26c090c9dd5cedc690f34a9f879f5eb842a40e..254f96cb2d853293f8e42f24f0177f55e37652fa 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package main
+package keepstore
 
 import (
        "container/list"
index 516ea3534166e30eb8ab52c1a469af65cf45dfd6..36a33376a1892fb88f5458e18a1d3f59484c27e0 100755 (executable)
@@ -526,6 +526,7 @@ case "$subcmd" in
             echo "Status: running"
             echo "Container IP: $(getip)"
             echo "Published host: $(gethost)"
+           echo "Workbench: https://$(gethost)"
         else
             echo "Status: not running"
         fi
index c112972c4303103a6fee1fc920fa309022b340ee..4556652563681cfb0618d854917b640b21f6d425 100644 (file)
@@ -105,7 +105,7 @@ RUN apt-key add --no-tty /tmp/8D81803C0EBFCD88.asc && \
 RUN mkdir -p /etc/apt/sources.list.d && \
     echo deb https://download.docker.com/linux/debian/ buster stable > /etc/apt/sources.list.d/docker.list && \
     apt-get update && \
-    apt-get -yq --no-install-recommends install docker-ce=5:19.03.13~3-0~debian-buster && \
+    apt-get -yq --no-install-recommends install docker-ce=5:20.10.6~3-0~debian-buster && \
     apt-get clean
 
 # Set UTF-8 locale
index cb0dc2d652d9a8c54fb7db3bd39cb9255015faff..ffd8a8748fb2d3422c9693e5932c5d07b5079378 100644 (file)
@@ -47,7 +47,6 @@ RUN sudo -u arvbox /var/lib/arvbox/service/doc/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/vm/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/keepproxy/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/arv-git-httpd/run-service --only-deps
-RUN /var/lib/arvbox/service/crunch-dispatch-local/run --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/websockets/run --only-deps
 RUN sudo -u arvbox /usr/local/lib/arvbox/keep-setup.sh --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/sdk/run-service
index 7cf58e201d1e27ca2492d9ace8d9d241d1c4dc41..66a4ff474768da2428e0404e021ce0d1ab6f4d13 100755 (executable)
@@ -42,6 +42,13 @@ if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
     mkdir -p /tmp/crunch0 /tmp/crunch1
     chown crunch:crunch -R /tmp/crunch0 /tmp/crunch1
 
+    # singularity needs to be owned by root and suid
+    chown root /var/lib/arvados/bin/singularity \
+         /var/lib/arvados/etc/singularity/singularity.conf \
+         /var/lib/arvados/etc/singularity/capability.json \
+         /var/lib/arvados/etc/singularity/ecl.toml
+    chmod u+s /var/lib/arvados/bin/singularity
+
     echo "arvbox    ALL=(crunch) NOPASSWD: ALL" >> /etc/sudoers
 
     cat <<EOF > /etc/profile.d/paths.sh
deleted file mode 100755 (executable)
index 3ce2220d0e26d5dc70705e8c8cafb1a7303225ae..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -ex -o pipefail
-
-# singularity can use suid
-chown root /var/lib/arvados/bin/singularity \
-      /var/lib/arvados/etc/singularity/singularity.conf \
-      /var/lib/arvados/etc/singularity/capability.json \
-      /var/lib/arvados/etc/singularity/ecl.toml
-chmod u+s /var/lib/arvados/bin/singularity
-
-exec /usr/local/lib/arvbox/runsu.sh $0-service $1
new file mode 120000 (symlink)
index 0000000000000000000000000000000000000000..a388c8b67bf16bbb16601007540e58f1372ebc85
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
index fda1545a05bcf8048b47f2838a058ccd7c542ffb..f3bc09f65036c7349e8f9f9fa1cd21746c25cdec 100644 (file)
@@ -20,6 +20,7 @@ postgres:
     - postgresql12-contrib
 
   {%- else %}
+  use_upstream_repo: false
   pkgs_extra:
     - postgresql-contrib
   {%- endif %}
index 91617e4fa4765e5e3365a4269937ac6987a94d17..4aa9bb62ec7045284bd2e7d34bd065adc9e2939e 100644 (file)
@@ -36,6 +36,19 @@ arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_in
       - openssl
       - ca-certificates
 
+# Remove the RANDFILE parameter in openssl.cnf as it makes openssl fail in Ubuntu 18.04
+# Saving and restoring the rng state is not necessary anymore in the openssl 1.1.1
+# random generator, cf
+#   https://github.com/openssl/openssl/issues/7754
+#
+arvados_test_salt_states_examples_single_host_snakeoil_certs_file_comment_etc_openssl_conf:
+  file.comment:
+    - name: /etc/ssl/openssl.cnf
+    - regex: ^RANDFILE.*
+    - onlyif: grep -q ^RANDFILE /etc/ssl/openssl.cnf
+    - require_in:
+      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run
+
 arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run:
   # Taken from https://github.com/arvados/arvados/blob/master/tools/arvbox/lib/arvbox/docker/service/certificate/run
   cmd.run:
index 020efa94e8f61303e06da5d087ecd712f9f1991f..a47294b3bd15c9874803c5c4aef9d2e765d83afb 100755 (executable)
@@ -69,4 +69,4 @@ echo "Switching to user '__INITIAL_USER__'"
 export ARVADOS_API_TOKEN="${user_api_token}"
 
 echo "Running test CWL workflow"
-cwl-runner hasher-workflow.cwl hasher-workflow-job.yml
+cwl-runner --debug hasher-workflow.cwl hasher-workflow-job.yml