17840: Merge branch 'main'
authorTom Clegg <tom@curii.com>
Tue, 16 Nov 2021 20:28:06 +0000 (15:28 -0500)
committerTom Clegg <tom@curii.com>
Tue, 16 Nov 2021 20:28:06 +0000 (15:28 -0500)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

75 files changed:
.gitignore
apps/workbench/Gemfile.lock
cmd/arvados-package/install.go
doc/Rakefile
doc/_config.yml
doc/_includes/_metadata_vocabulary_example.liquid [moved from doc/_includes/_wb2_vocabulary_example.liquid with 90% similarity]
doc/_includes/_tutorial_expectations.liquid
doc/admin/metadata-vocabulary.html.textile.liquid [moved from doc/admin/workbench2-vocabulary.html.textile.liquid with 75% similarity]
doc/admin/upgrading.html.textile.liquid
doc/admin/user-activity.html.textile.liquid
doc/api/permission-model.html.textile.liquid
doc/api/projects.html.textile.liquid
doc/architecture/keep-components-overview.html.textile.liquid
doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
doc/install/arvados-on-kubernetes.html.textile.liquid
doc/install/crunch2/install-compute-node-docker.html.textile.liquid
doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
doc/install/install-workbench2-app.html.textile.liquid
doc/install/salt-multi-host.html.textile.liquid
doc/user/topics/arvados-sync-groups.html.textile.liquid
doc/user/topics/collection-versioning.html.textile.liquid
lib/config/config.default.yml
lib/config/export.go
lib/config/generated_config.go
lib/controller/federation.go
lib/controller/federation/conn.go
lib/controller/federation/federation_test.go
lib/controller/federation/generate.go
lib/controller/federation/generated.go
lib/controller/federation/login_test.go
lib/controller/federation/user_test.go
lib/controller/handler.go
lib/controller/handler_test.go
lib/controller/integration_test.go
lib/controller/localdb/collection.go
lib/controller/localdb/collection_test.go
lib/controller/localdb/conn.go
lib/controller/localdb/container_request.go [new file with mode: 0644]
lib/controller/localdb/container_request_test.go [new file with mode: 0644]
lib/controller/localdb/group.go [new file with mode: 0644]
lib/controller/localdb/group_test.go [new file with mode: 0644]
lib/controller/localdb/link.go [new file with mode: 0644]
lib/controller/localdb/link_test.go [new file with mode: 0644]
lib/controller/router/response.go
lib/controller/router/router.go
lib/controller/router/router_test.go
lib/controller/rpc/conn.go
lib/crunchrun/crunchrun.go
lib/crunchrun/crunchrun_test.go
lib/install/deps.go
lib/install/deps_go_version_test.go [new file with mode: 0644]
sdk/go/arvados/api.go
sdk/go/arvados/config.go
sdk/go/arvados/container.go
sdk/go/arvados/link.go
sdk/go/arvados/vocabulary.go [new file with mode: 0644]
sdk/go/arvados/vocabulary_test.go [new file with mode: 0644]
sdk/go/arvadostest/api.go
sdk/python/arvados/collection.py
sdk/python/tests/run_test_server.py
sdk/python/tests/test_collections.py
services/api/Gemfile.lock
services/api/app/models/user.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/unit/permission_test.rb
services/api/test/unit/user_test.rb
services/fuse/arvados_fuse/command.py
services/fuse/arvados_fuse/fusedir.py
services/fuse/arvados_fuse/fusefile.py
services/fuse/tests/mount_test_base.py
services/fuse/tests/test_mount.py
services/keepstore/unix_volume.go
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base

index beb84b3c2034f23e7c3072ac510f4a43722a0c75..231424accd37d1549e1edf3d066aa93a135dfa31 100644 (file)
@@ -32,5 +32,6 @@ services/api/config/arvados-clients.yml
 .Rproj.user
 _version.py
 *.bak
+*.log
 arvados-snakeoil-ca.pem
 .vagrant
index ab9256a38688e1af3f7ac2e5905108cea38a9ebb..13c4430965c3244bee92111d5ee7238cfe6f0797 100644 (file)
@@ -178,7 +178,7 @@ GEM
       mime-types-data (~> 3.2015)
     mime-types-data (3.2019.0331)
     mini_mime (1.1.0)
-    mini_portile2 (2.5.3)
+    mini_portile2 (2.6.1)
     minitest (5.10.3)
     mocha (1.8.0)
       metaclass (~> 0.0.1)
@@ -194,8 +194,8 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.5.7)
-    nokogiri (1.11.7)
-      mini_portile2 (~> 2.5.0)
+    nokogiri (1.12.5)
+      mini_portile2 (~> 2.6.1)
       racc (~> 1.4)
     npm-rails (0.2.1)
       rails (>= 3.2)
@@ -214,7 +214,7 @@ GEM
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
     public_suffix (4.0.6)
-    racc (1.5.2)
+    racc (1.6.0)
     rack (2.2.3)
     rack-mini-profiler (1.0.2)
       rack (>= 1.2.0)
index 85c64b867e2b7abfd69af14f78aad431ca61e185..5fdb7a87563aaec1323f9ec26553387c49b4e889 100644 (file)
@@ -45,14 +45,14 @@ func testinstall(ctx context.Context, opts opts, stdin io.Reader, stdout, stderr
                        opts.TargetOS,
                        "bash", "-c", `
 set -e -o pipefail
-apt-get update
+apt-get --allow-releaseinfo-change update
 apt-get install -y --no-install-recommends dpkg-dev eatmydata
 
 mkdir /tmp/pkg
 ln -s /pkg/*.deb /tmp/pkg/
 (cd /tmp/pkg; dpkg-scanpackages --multiversion . | gzip > Packages.gz)
 echo >/etc/apt/sources.list.d/arvados-local.list "deb [trusted=yes] file:///tmp/pkg ./"
-apt-get update
+apt-get --allow-releaseinfo-change update
 
 eatmydata apt-get install -y --no-install-recommends arvados-server-easy postgresql
 eatmydata apt-get remove -y dpkg-dev
@@ -88,7 +88,7 @@ rm /etc/apt/sources.list.d/arvados-local.list
                "bash", "-c", `
 set -e -o pipefail
 PATH="/var/lib/arvados/bin:$PATH"
-apt-get update
+apt-get --allow-releaseinfo-change update
 apt-get install -y --no-install-recommends dpkg-dev
 mkdir /tmp/pkg
 ln -s /pkg/*.deb /tmp/pkg/
@@ -97,7 +97,7 @@ apt-get remove -y dpkg-dev
 echo
 
 echo >/etc/apt/sources.list.d/arvados-local.list "deb [trusted=yes] file:///tmp/pkg ./"
-apt-get update
+apt-get --allow-releaseinfo-change update
 eatmydata apt-get install --reinstall -y --no-install-recommends arvados-server-easy`+versionsuffix+`
 SUDO_FORCE_REMOVE=yes apt-get autoremove -y
 
index 2b4b6af2e0cb2385a95835033fc69c90c5dd8ce3..4427f7822e855e0a0a9d10e5ebb140168114f2b9 100644 (file)
@@ -160,7 +160,8 @@ task :linkchecker => [ :generate ] do
   Dir.chdir(".site") do
     `which linkchecker`
     if $? == 0
-      system "linkchecker index.html --ignore-url='!file://'" or exit $?.exitstatus
+      # we need --check-extern to check relative links, weird but true
+      system "linkchecker index.html --check-extern --ignore-url='!file://'" or exit $?.exitstatus
     else
       puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
     end
index 31db9c41d54eb82fbb57c3fbc798f4cffeaa5e69..dde87323d778e47f7e6297da784a1995d9f9726d 100644 (file)
@@ -194,7 +194,7 @@ navbar:
       - admin/keep-balance.html.textile.liquid
       - admin/controlling-container-reuse.html.textile.liquid
       - admin/logs-table-management.html.textile.liquid
-      - admin/workbench2-vocabulary.html.textile.liquid
+      - admin/metadata-vocabulary.html.textile.liquid
       - admin/storage-classes.html.textile.liquid
       - admin/keep-recovering-data.html.textile.liquid
       - admin/keep-measuring-deduplication.html.textile.liquid
similarity index 90%
rename from doc/_includes/_wb2_vocabulary_example.liquid
rename to doc/_includes/_metadata_vocabulary_example.liquid
index ee2ac97ef3cf5a7d4af57b39f53f0a88dfedcffe..fb8e57725bb17d6795d83941863333748098365d 100644 (file)
@@ -1,4 +1,8 @@
-{
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}{
     "strict_tags": false,
     "tags": {
         "IDTAGANIMALS": {
index 09b18f0d4d662ac805f22edfbbe594867a40245d..d4d05078f6ce86f8ac564b11049ce282539b16f2 100644 (file)
@@ -5,5 +5,5 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin' %}
-This tutorial assumes that you have access to the "Arvados command line tools":/user/getting_started/setup-cli.html and have set the "API token":{{site.baseurl}}/user/reference/api-tokens.html and confirmed a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html .
+This tutorial assumes that you have access to the "Arvados command line tools":{{ site.baseurl }}/user/getting_started/setup-cli.html and have set the "API token":{{site.baseurl}}/user/reference/api-tokens.html and confirmed a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html .
 {% include 'notebox_end' %}
similarity index 75%
rename from doc/admin/workbench2-vocabulary.html.textile.liquid
rename to doc/admin/metadata-vocabulary.html.textile.liquid
index 9a8d7fcd015b795144ca21e277be6379fa34a84f..170699ab6c36d3207ad08fe36d6a6631dce9d1f4 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: admin
-title: User properties vocabulary
+title: Metadata vocabulary
 ...
 
 {% comment %}
@@ -12,17 +12,19 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Many Arvados objects (like collections and projects) can store metadata as properties that in turn can be used in searches allowing a flexible way of organizing data inside the system.
 
-The Workbench2 user interface enables the site adminitrator to set up a properties vocabulary formal definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept.
+Arvados enables the site administrator to set up a formal metadata vocabulary definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept on clients' UI such as workbench2.
 
-h2. Workbench2 configuration
+The Controller service loads and caches the configured vocabulary file in memory at startup time, exporting it on a particular endpoint. From time to time, it'll check for updates in the local copy and refresh its cache if validation passes.
 
-Workbench2 retrieves the vocabulary file URL from the cluster config as shown:
+h2. Configuration
+
+The site administrator should place the JSON vocabulary file on the same host as the controller service and set up the config file as follows:
 
 <notextile>
 <pre><code>Cluster:
   zzzzz:
-    Workbench:
-      VocabularyURL: <span class="userinput">https://site.example.com/vocabulary.json</span>
+    API:
+      VocabularyPath: <span class="userinput">/etc/arvados/vocabulary.json</span>
 </code></pre>
 </notextile>
 
@@ -35,10 +37,12 @@ Keys and values are indexed by identifiers so that the concept of a term is pres
 The following is an example of a vocabulary definition:
 
 {% codeblock as json %}
-{% include 'wb2_vocabulary_example' %}
+{% include 'metadata_vocabulary_example' %}
 {% endcodeblock %}
 
-If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. Take notice that this restriction is at the client level on Workbench2, it doesn't limit the user's ability to set any arbitrary property via other means (e.g. Python SDK or CLI commands)
+For clients to be able to query the vocabulary definition, a special endpoint is exposed on the @controller@ service: @/arvados/v1/vocabulary@. This endpoint doesn't require authentication and returns the vocabulary definition in JSON format.
+
+If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. This restriction is enforced at the backend level to ensure consistency across different clients.
 
 Inside the @tags@ member, IDs are defined (@IDTAGANIMALS@, @IDTAGCOMMENT@, @IDTAGIMPORTANCES@) and can have any format that the current application requires. Every key will declare at least a @labels@ list with zero or more label objects.
 
index 0aea90bd0ddab04164ee6a668e26c1851ab6e934..c1a7ae87dec28d0c9a439334eabc4c42a98f6180 100644 (file)
@@ -35,10 +35,14 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
-h2(#main). development main (as of 2021-10-27)
+h2(#main). development main (as of 2021-11-10)
 
 "previous: Upgrading from 2.3.0":#v2_3_0
 
+h3. Users are visible to other users by default
+
+When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@.
+
 h3. Dedicated keepstore process for each container
 
 When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.
@@ -46,6 +50,12 @@ When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ sup
 * If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers.
 * This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication@. If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log.
 
+h3. Backend support for vocabulary checking
+
+If your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed.
+This will enable the vocabulary checking cluster-wide, including Workbench2. The @Workbench.VocabularyURL@ configuration parameter is deprecated and will be removed in a future release.
+You can read more about how this feature works on the "admin page":{{site.baseurl}}/admin/metadata-vocabulary.html.
+
 h2(#v2_3_0). v2.3.0 (2021-10-27)
 
 "previous: Upgrading to 2.2.0":#v2_2_0
@@ -292,7 +302,7 @@ Workbench 2 is now ready for regular use.  Follow the instructions to "install w
 
 h3. New property vocabulary format for Workbench2
 
-(feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "workbench2 vocabulary format admin page":{{site.baseurl}}/admin/workbench2-vocabulary.html for more information.
+(feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "metadata vocabulary format admin page":{{site.baseurl}}/admin/metadata-vocabulary.html for more information.
 
 h3. Cloud installations only: node manager replaced by arvados-dispatch-cloud
 
@@ -407,7 +417,7 @@ h3. Python packaging change
 
 As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed.
 
-One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
+One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
 
 Python scripts that rely on the distribution Arvados Python SDK packages to import the Arvados SDK will need to be tweaked to load the correct Python environment.
 
index 21bfb7655ca997f51c5f268d94ac7979b9786c3b..01715ff6e3484877598834e985fdb1576fc0c505 100644 (file)
@@ -17,7 +17,7 @@ h2. Option 1: Install from a distribution package
 
 This installation method is recommended to make the CLI tools available system-wide. It can coexist with the installation method described in option 2, below.
 
-First, configure the "Arvados package repositories":../../install/packages.html
+First, configure the "Arvados package repositories":{{ site.baseurl }}/install/packages.html
 
 {% assign arvados_component = 'python3-arvados-user-activity' %}
 
@@ -31,7 +31,7 @@ Step 2: Change directory to @arvados/tools/user-activity@
 
 Step 3: Run @pip install .@ in an appropriate installation environment, such as a @virtualenv@.
 
-Note: depends on the "Arvados Python SDK":../sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).
+Note: depends on the "Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).
 
 h2. Usage
 
index 82e8128c6e80445aed6f96409aa0165cd06c74b4..a44d2eefa13ac7cc43e89776d9773169f3b5fe4e 100644 (file)
@@ -46,7 +46,7 @@ This grants the permission in @name@ for @tail_uuid@ accessing @head_uuid@.
 
 If a User has *can_manage* permission on some object, the user has the ability to read, create, update and delete permission links with @head_uuid@ of the managed object.  In other words, the user has the ability to modify the permission grants on the object.
 
-The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the CLI cheat sheet":/install/cheat_sheet.html#vm-login for an example.
+The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the 'User management at the CLI' page":{{ site.baseurl }}/admin/user-management-cli.html#vm-login for an example.
 
 h3. Transitive permissions
 
@@ -66,7 +66,7 @@ A "project" is a subtype of Group that is displayed as a "Project" in Workbench,
 * The name of a project is unique only among projects and filters with the same owner_uuid.
 * Projects can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@).  Putting a project in a @tail_uuid@ field is an error.
 
-A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount@. See "the groups API documentation":/api/methods/groups.html for more information.
+A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount@. See "the groups API documentation":{{ site.baseurl }}/api/methods/groups.html for more information.
 * A filter group cannot own things (cannot appear in @owner_uuid@).  Putting a filter group in an @owner_uuid@ field is an error.
 * A filter group can be owned by a user or a project.
 * The name of a filter is unique only among projects and filters with the same owner_uuid.
index b1c74fe0d729794f26c2b535989f9dedf21c462b..9aa3d85d4d5297adfc91d396a9f8b518d9ff831e 100644 (file)
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados @projects@ are used to organize objects. Projects can contain @collections@, @container requests@, @workflows@, etc. Projects can also contain other projects. An object is part of a project if the @owner_uuid@ of the object is set to the uuid of the project.
 
-Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":/api/methods/groups.html.
+Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":{{ site.baseurl }}/api/methods/groups.html.
 
 Projects can be manipulated via Workbench, the cli tools, the SDKs, and the Arvados APIs.
 
index b07716aacf09b522781f8e5754c3fc30a2e3f591..4b1ca9b8458d27c518c53c3703bd7be7b9a71560 100644 (file)
@@ -14,13 +14,13 @@ Keep has a number of components. This page describes each component and the role
 h3. Keep clients for data access
 
 In order to access data in Keep, a client is needed to store data in and retrieve data from Keep. Different types of Keep clients exist:
-* a command line client like "@arv-get@":/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":/user/tutorials/tutorial-keep.html#upload-using-command
-* a FUSE mount provided by "@arv-mount@":/user/tutorials/tutorial-keep-mount-gnu-linux.html
+* a command line client like "@arv-get@":{{ site.baseurl }}/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":{{ site.baseurl }}/user/tutorials/tutorial-keep.html#upload-using-command
+* a FUSE mount provided by "@arv-mount@":{{ site.baseurl }}/user/tutorials/tutorial-keep-mount-gnu-linux.html
 * a WebDAV mount provided by @keep-web@
 * an S3-compatible endpoint provided by @keep-web@
-* programmatic access via the "Arvados SDKs":/sdk/index.html
+* programmatic access via the "Arvados SDKs":{{ site.baseurl }}/sdk/index.html
 
-In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":/architecture/keep-clients.html.
+In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":{{ site.baseurl }}/architecture/keep-clients.html.
 
 For example, when a request comes in to read a file from Keep, the client will
 * request the collection object (including its manifest) from the API server
@@ -32,7 +32,7 @@ All of those steps are subject to access control, which applies at the level of
 
 h3. API server
 
-The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
+The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":{{ site.baseurl }}/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
 
 h3. Keepstore
 
index f7b7a1641526f8d19d007335dd020c78ebd648ed..5a5d59bc86201c82602bd84b4c6b26f2ce8d117e 100644 (file)
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This page documents setting up and running the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
+This page documents setting up and running the "Arvados on Kubernetes":{{ site.baseurl }}/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
 
 h2. Prerequisites
 
index 9ecb2c89562b446166721ca0b9b0c04d5b4091c5..6b292caf3904d5a078ae9d8eaf939d41b4f0c236 100644 (file)
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This page documents setting up and running the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
+This page documents setting up and running the "Arvados on Kubernetes":{{ site.baseurl }}/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
 
 h2. Prerequisites
 
index 9169b7810ed94b0b8411a7cc238c4fede72c203e..5ef757d10e689b33c8466a5acf44f7771a7142c3 100644 (file)
@@ -28,5 +28,5 @@ h2. Requirements
 * Minikube or Google Kubernetes Engine (Kubernetes 1.10+ with at least 3 nodes, 2+ cores per node)
 * @kubectl@ and @Helm 3@ installed locally, and able to connect to your Kubernetes cluster
 
-Please refer to "Arvados on Minikube":/install/arvados-on-kubernetes-minikube.html or "Arvados on GKE":/install/arvados-on-kubernetes-GKE.html for detailed installation instructions.
+Please refer to "Arvados on Minikube":{{ site.baseurl }}/install/arvados-on-kubernetes-minikube.html or "Arvados on GKE":{{ site.baseurl }}/install/arvados-on-kubernetes-GKE.html for detailed installation instructions.
 
index 876bb6ae5da58e9d928e7b49bcd9bee6d5b949a4..66bd85b7c5038073beaf95d342fde7c2060d90b2 100644 (file)
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.
 {% include 'notebox_end' %}
 
 {% include 'notebox_begin_warning' %}
index 09a3b4e3aba38bec42c3e7eb8168eff94fe60f95..8e9db0c4e3d7bcd2a1312f19caeba89ba08da245 100644 (file)
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.
 {% include 'notebox_end' %}
 
 {% include 'notebox_begin_warning' %}
index f3a320b10251745f64a8d7eece1e36fb73628e6a..c9a1c7012659fd1a3b629431441dcda69f018a68 100644 (file)
@@ -75,7 +75,7 @@ server {
 
 h2. Vocabulary configuration (optional)
 
-Workbench2 can load a vocabulary file which lists available metadata properties for groups and collections.  To configure the property vocabulary definition, please visit the "Workbench2 Vocabulary Format":{{site.baseurl}}/admin/workbench2-vocabulary.html page in the Admin section.
+Workbench2 can load a vocabulary file which lists available metadata properties for groups and collections.  To configure the property vocabulary definition, please visit the "Metadata Vocabulary Format":{{site.baseurl}}/admin/metadata-vocabulary.html page in the Admin section.
 
 {% assign arvados_component = 'arvados-workbench2' %}
 
index ab36035a846fc05e8204a751672b313239ceb074..e497240c4c20ea27a06d4e522b7d2ab033f102a8 100644 (file)
@@ -11,7 +11,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 # "Introduction":#introduction
 # "Hosts preparation":#hosts_preparation
-## "Hosts setup using terraform (experimental)":#hosts_setup_using_terraform
 ## "Create a compute image":#create_a_compute_image
 # "Multi host install using the provision.sh script":#multi_host
 # "Choose the desired configuration":#choose_configuration
@@ -65,14 +64,6 @@ Note that these hosts can be virtual machines in your infrastructure and they do
 
 Again, if your infrastructure differs from the setup proposed above (ie, using RDS or an existing DB server), remember that you will need to edit the configuration files for the scripts so they work with your infrastructure.
 
-
-h3(#hosts_setup_using_terraform). Hosts setup using terraform (AWS, experimental)
-
-We added a few "terraform":https://terraform.io/ scripts (https://github.com/arvados/arvados/tree/main/tools/terraform) to let you create these instances easier in an AWS account. Check "the Arvados terraform documentation":/doc/install/terraform.html for more details.
-
-
-
-
 h2(#multi_host). Multi host install using the provision.sh script
 
 {% include 'branchname' %}
@@ -112,7 +103,7 @@ The <i>multi_host</i> example includes Let's Encrypt salt code to automatically
 
 h3(#further_customization). Further customization of the installation (modifying the salt pillars and states)
 
-You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that can be retrieved as output of the terraform run.
+You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that describes your environment.
 
 Any extra <i>state</i> file you add under <i>local_config_dir/states</i> will be added to the salt run and applied to the hosts.
 
index 26be56782de0633f6a485b9b29624cabd60d4bd7..1f7eede4bb14650a862e1276cf1b8bccbc05e429 100644 (file)
@@ -19,7 +19,7 @@ Every line on the file should have 3 values: a group name, a local user identifi
 
 Users can be identified by their email address or username: the tool will check if every user exist on the system, and report back when not found. Groups on the other hand, are identified by their name.
 
-Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":/admin/group-management.html.
+Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":{{ site.baseurl }}/admin/group-management.html.
 
 This tool is designed to be run periodically reading a file created by a remote auth system (ie: LDAP) dump script, applying what's included on the file as the source of truth.
 
index 9a32de0d0b35ba335b9890ce918188ecf987b866..d6a3bb4c10eaaff84e6fb337d1fa32538800654f 100644 (file)
@@ -18,7 +18,7 @@ A version will be saved when one of the following conditions is true:
 
 One is by "configuring (system-wide) the collection's idle time":{{site.baseurl}}/admin/collection-versioning.html. This idle time is checked against the @modified_at@ attribute so that the version is saved when one or more of the previously enumerated attributes get updated and the @modified_at@ is at least at the configured idle time in the past. This way, a frequently updated collection won't create lots of version records that may not be useful.
 
-The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated.
+The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated. This includes either creating a new collection or updating a preexisting one. In the case of using @preserve_version = true@ on a collection's create call, the new record state will be preserved as a snapshot on the next update.
 
 h3. Collection's past versions behavior & limitations
 
index 97ded6bf6863739e23ed53d365be8db1014a5b35..411a79650b3421df477f32e0e96b574d068023ae 100644 (file)
@@ -234,6 +234,12 @@ Clusters:
       # Timeout on requests to internal Keep services.
       KeepServiceRequestTimeout: 15s
 
+      # Vocabulary file path, local to the node running the controller.
+      # This JSON file should contain the description of what's allowed
+      # as object's metadata. Its format is described at:
+      # https://doc.arvados.org/admin/metadata-vocabulary.html
+      VocabularyPath: ""
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -259,6 +265,16 @@ Clusters:
       # user agreements.  Should only be enabled for development.
       NewUsersAreActive: false
 
+      # Newly activated users (whether set up by an admin or via
+      # AutoSetupNewUsers) immediately become visible to other active
+      # users.
+      #
+      # On a multi-tenant cluster, where the intent is for users to be
+      # invisible to one another unless they have been added to the
+      # same group(s) via Workbench admin interface, change this to
+      # false.
+      ActivatedUsersAreVisibleToOthers: true
+
       # The e-mail address of the user you would like to become marked as an admin
       # user on their first login.
       AutoAdminUserWithEmail: ""
@@ -1566,7 +1582,6 @@ Clusters:
       DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
 
       # Workbench2 configs
-      VocabularyURL: ""
       FileViewersConfigURL: ""
 
       # Idle time after which the user's session will be auto closed.
index e36d6e76cae40d17c54217e643bf82341e298b87..4c4e341f5a34c8608e554f407aab4869da88a8f6 100644 (file)
@@ -72,6 +72,7 @@ var whitelist = map[string]bool{
        "API.MaxTokenLifetime":                                false,
        "API.RequestTimeout":                                  true,
        "API.SendTimeout":                                     true,
+       "API.VocabularyPath":                                  false,
        "API.WebsocketClientEventQueue":                       false,
        "API.WebsocketServerEventQueue":                       false,
        "AuditLogs":                                           false,
@@ -95,7 +96,7 @@ var whitelist = map[string]bool{
        "Collections.BlobTrashCheckInterval":                  false,
        "Collections.BlobTrashConcurrency":                    false,
        "Collections.BlobTrashLifetime":                       false,
-       "Collections.CollectionVersioning":                    false,
+       "Collections.CollectionVersioning":                    true,
        "Collections.DefaultReplication":                      true,
        "Collections.DefaultTrashLifetime":                    true,
        "Collections.ForwardSlashNameSubstitution":            true,
@@ -215,6 +216,7 @@ var whitelist = map[string]bool{
        "SystemRootToken":                                     false,
        "TLS":                                                 false,
        "Users":                                               true,
+       "Users.ActivatedUsersAreVisibleToOthers":              false,
        "Users.AdminNotifierEmailFrom":                        false,
        "Users.AnonymousUserToken":                            true,
        "Users.AutoAdminFirstUser":                            false,
@@ -276,7 +278,6 @@ var whitelist = map[string]bool{
        "Workbench.UserProfileFormFields.*.*":                 true,
        "Workbench.UserProfileFormFields.*.*.*":               true,
        "Workbench.UserProfileFormMessage":                    true,
-       "Workbench.VocabularyURL":                             true,
        "Workbench.WelcomePageHTML":                           true,
 }
 
index f7849d6142cda6e4353a076a90a561295f7fa034..f8553c3eb758785edb6e023b10e8a9697085e74c 100644 (file)
@@ -240,6 +240,12 @@ Clusters:
       # Timeout on requests to internal Keep services.
       KeepServiceRequestTimeout: 15s
 
+      # Vocabulary file path, local to the node running the controller.
+      # This JSON file should contain the description of what's allowed
+      # as object's metadata. Its format is described at:
+      # https://doc.arvados.org/admin/metadata-vocabulary.html
+      VocabularyPath: ""
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -265,6 +271,16 @@ Clusters:
       # user agreements.  Should only be enabled for development.
       NewUsersAreActive: false
 
+      # Newly activated users (whether set up by an admin or via
+      # AutoSetupNewUsers) immediately become visible to other active
+      # users.
+      #
+      # On a multi-tenant cluster, where the intent is for users to be
+      # invisible to one another unless they have been added to the
+      # same group(s) via Workbench admin interface, change this to
+      # false.
+      ActivatedUsersAreVisibleToOthers: true
+
       # The e-mail address of the user you would like to become marked as an admin
       # user on their first login.
       AutoAdminUserWithEmail: ""
@@ -1572,7 +1588,6 @@ Clusters:
       DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
 
       # Workbench2 configs
-      VocabularyURL: ""
       FileViewersConfigURL: ""
 
       # Idle time after which the user's session will be auto closed.
index 144d41c21beb62213195d537d32bca8fa9650f99..cd69727ecb5d2fac27f2777905ad4ba0b5bd4ef7 100644 (file)
@@ -121,8 +121,6 @@ func (h *Handler) setupProxyRemoteCluster(next http.Handler) http.Handler {
 
                mux.ServeHTTP(w, req)
        })
-
-       return mux
 }
 
 type CurrentUser struct {
index aa05cb1e6d58bb954e4573e5c54b4416d6f671d3..d1bf473d76856abd59bfb35f069e4f47f498e680 100644 (file)
@@ -22,6 +22,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/auth"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/health"
 )
 
 type Conn struct {
@@ -30,20 +31,25 @@ type Conn struct {
        remotes map[string]backend
 }
 
-func New(cluster *arvados.Cluster) *Conn {
+func New(cluster *arvados.Cluster, healthFuncs *map[string]health.Func) *Conn {
        local := localdb.NewConn(cluster)
        remotes := map[string]backend{}
        for id, remote := range cluster.RemoteClusters {
                if !remote.Proxy || id == cluster.ClusterID {
                        continue
                }
-               conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(local, id))
+               conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(cluster, local, id))
                // Older versions of controller rely on the Via header
                // to detect loops.
                conn.SendHeader = http.Header{"Via": {"HTTP/1.1 arvados-controller"}}
                remotes[id] = conn
        }
 
+       if healthFuncs != nil {
+               hf := map[string]health.Func{"vocabulary": local.LastVocabularyError}
+               *healthFuncs = hf
+       }
+
        return &Conn{
                cluster: cluster,
                local:   local,
@@ -55,7 +61,7 @@ func New(cluster *arvados.Cluster) *Conn {
 // tokens from an incoming request context, determines whether they
 // should (and can) be salted for the given remoteID, and returns the
 // resulting tokens.
-func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
+func saltedTokenProvider(cluster *arvados.Cluster, local backend, remoteID string) rpc.TokenProvider {
        return func(ctx context.Context) ([]string, error) {
                var tokens []string
                incoming, ok := auth.FromContext(ctx)
@@ -63,6 +69,16 @@ func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
                        return nil, errors.New("no token provided")
                }
                for _, token := range incoming.Tokens {
+                       if strings.HasPrefix(token, "v2/"+cluster.ClusterID+"-") && remoteID == cluster.Login.LoginCluster {
+                               // If we did this, the login cluster
+                               // would call back to us and then
+                               // reject our response because the
+                               // user UUID prefix (i.e., the
+                               // LoginCluster prefix) won't match
+                               // the token UUID prefix (i.e., our
+                               // prefix).
+                               return nil, httpErrorf(http.StatusUnauthorized, "cannot use a locally issued token to forward a request to our login cluster (%s)", remoteID)
+                       }
                        salted, err := auth.SaltToken(token, remoteID)
                        switch err {
                        case nil:
@@ -192,6 +208,10 @@ func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
        return json.RawMessage(buf.Bytes()), err
 }
 
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+       return conn.chooseBackend(conn.cluster.ClusterID).VocabularyGet(ctx)
+}
+
 func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
        if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID {
                // defer entire login procedure to designated cluster
@@ -465,6 +485,26 @@ func (conn *Conn) GroupUntrash(ctx context.Context, options arvados.UntrashOptio
        return conn.chooseBackend(options.UUID).GroupUntrash(ctx, options)
 }
 
+func (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+       return conn.chooseBackend(options.ClusterID).LinkCreate(ctx, options)
+}
+
+func (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+       return conn.chooseBackend(options.UUID).LinkUpdate(ctx, options)
+}
+
+func (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+       return conn.chooseBackend(options.UUID).LinkGet(ctx, options)
+}
+
+func (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+       return conn.generated_LinkList(ctx, options)
+}
+
+func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+       return conn.chooseBackend(options.UUID).LinkDelete(ctx, options)
+}
+
 func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
        return conn.generated_SpecimenList(ctx, options)
 }
index fdc4d96cfaa90b3e28dd2048c2c5bd0f73bf9dc5..5460e938a66348ec2a98f2a478372ea901c4c235 100644 (file)
@@ -70,7 +70,7 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
        ctx = ctrlctx.NewWithTransaction(ctx, s.tx)
        s.ctx = ctx
 
-       s.fed = New(s.cluster)
+       s.fed = New(s.cluster, nil)
 }
 
 func (s *FederationSuite) TearDownTest(c *check.C) {
@@ -93,5 +93,5 @@ func (s *FederationSuite) addHTTPRemote(c *check.C, id string, backend backend)
                Host:   srv.Addr,
                Proxy:  true,
        }
-       s.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: "http", Host: srv.Addr}, true, saltedTokenProvider(s.fed.local, id))
+       s.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: "http", Host: srv.Addr}, true, saltedTokenProvider(s.cluster, s.fed.local, id))
 }
index 06a5ce12d792ce000d669f513f9b1908ae1011f8..b49e138ce1f635c3b692a873d646da20ca444173 100644 (file)
@@ -53,7 +53,7 @@ func main() {
                defer out.Close()
                out.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\n\)\n`).Find(buf))
                io.WriteString(out, "//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\n")
-               for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User"} {
+               for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User", "Link"} {
                        _, err := out.Write(bytes.ReplaceAll(orig, []byte("Collection"), []byte(t)))
                        if err != nil {
                                panic(err)
index 49a2e5b7513f537f0f129fac6c8ba81103e72a29..e8a5a08ff00c10d491e5fb29032fe003fb9b137a 100755 (executable)
@@ -221,3 +221,44 @@ func (conn *Conn) generated_UserList(ctx context.Context, options arvados.ListOp
        }
        return merged, err
 }
+
+func (conn *Conn) generated_LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+       var mtx sync.Mutex
+       var merged arvados.LinkList
+       var needSort atomic.Value
+       needSort.Store(false)
+       err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
+               options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
+               cl, err := backend.LinkList(ctx, options)
+               if err != nil {
+                       return nil, err
+               }
+               mtx.Lock()
+               defer mtx.Unlock()
+               if len(merged.Items) == 0 {
+                       merged = cl
+               } else if len(cl.Items) > 0 {
+                       merged.Items = append(merged.Items, cl.Items...)
+                       needSort.Store(true)
+               }
+               uuids := make([]string, 0, len(cl.Items))
+               for _, item := range cl.Items {
+                       uuids = append(uuids, item.UUID)
+               }
+               return uuids, nil
+       })
+       if needSort.Load().(bool) {
+               // Apply the default/implied order, "modified_at desc"
+               sort.Slice(merged.Items, func(i, j int) bool {
+                       mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
+                       return mj.Before(mi)
+               })
+       }
+       if merged.Items == nil {
+               // Return empty results as [], not null
+               // (https://github.com/golang/go/issues/27589 might be
+               // a better solution in the future)
+               merged.Items = []arvados.Link{}
+       }
+       return merged, err
+}
index 5353ebf0f52ce7394b868838eb75e730945b4d8e..c05ebfce69820b3be781a3d18be8a591aaa94eb2 100644 (file)
@@ -47,7 +47,7 @@ func (s *LoginSuite) TestLogout(c *check.C) {
        s.cluster.Login.LoginCluster = "zhome"
        // s.fed is already set by SetUpTest, but we need to
        // reinitialize with the above config changes.
-       s.fed = New(s.cluster)
+       s.fed = New(s.cluster, nil)
 
        returnTo := "https://app.example.com/foo?bar"
        for _, trial := range []struct {
index 2812c1f41d5c6cd7aa3f02da5b6a06f051c6283a..064f8ce5d09e8e931f0769a23970ee676ba4e6ca 100644 (file)
@@ -30,7 +30,7 @@ type UserSuite struct {
 func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
        s.cluster.ClusterID = "local"
        s.cluster.Login.LoginCluster = "zzzzz"
-       s.fed = New(s.cluster)
+       s.fed = New(s.cluster, nil)
        s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
 
        for _, updateFail := range []bool{false, true} {
@@ -120,7 +120,7 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
 func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
        s.cluster.ClusterID = "local"
        s.cluster.Login.LoginCluster = "zzzzz"
-       s.fed = New(s.cluster)
+       s.fed = New(s.cluster, nil)
        s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
 
        opts := arvados.GetOptions{UUID: "zzzzz-tpzed-xurymjxw79nv3jz", Select: []string{"uuid", "email"}}
@@ -174,7 +174,7 @@ func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
 func (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {
        s.cluster.ClusterID = "local"
        s.cluster.Login.LoginCluster = "zzzzz"
-       s.fed = New(s.cluster)
+       s.fed = New(s.cluster, nil)
        s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")},
                true, rpc.PassthroughTokenProvider))
 
index a35d0030194e8bf9e79d1f2f256ff9fab5621fe7..b51d909110827bf7d8470120a87f5e29db008a15 100644 (file)
@@ -9,6 +9,7 @@ import (
        "errors"
        "fmt"
        "net/http"
+       "net/http/httptest"
        "net/url"
        "strings"
        "sync"
@@ -74,7 +75,21 @@ func (h *Handler) CheckHealth() error {
                return err
        }
        _, _, err = railsproxy.FindRailsAPI(h.Cluster)
-       return err
+       if err != nil {
+               return err
+       }
+       if h.Cluster.API.VocabularyPath != "" {
+               req, err := http.NewRequest("GET", "/arvados/v1/vocabulary", nil)
+               if err != nil {
+                       return err
+               }
+               var resp httptest.ResponseRecorder
+               h.handlerStack.ServeHTTP(&resp, req)
+               if resp.Result().StatusCode != http.StatusOK {
+                       return fmt.Errorf("%d %s", resp.Result().StatusCode, resp.Result().Status)
+               }
+       }
+       return nil
 }
 
 func (h *Handler) Done() <-chan struct{} {
@@ -85,18 +100,25 @@ func neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLas
 
 func (h *Handler) setup() {
        mux := http.NewServeMux()
-       mux.Handle("/_health/", &health.Handler{
-               Token:  h.Cluster.ManagementToken,
-               Prefix: "/_health/",
-               Routes: health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }},
-       })
+       healthFuncs := make(map[string]health.Func)
 
        oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
-       rtr := router.New(federation.New(h.Cluster), router.Config{
+       rtr := router.New(federation.New(h.Cluster, &healthFuncs), router.Config{
                MaxRequestSize: h.Cluster.API.MaxRequestSize,
                WrapCalls:      api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls),
        })
+
+       healthRoutes := health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }}
+       for name, f := range healthFuncs {
+               healthRoutes[name] = f
+       }
+       mux.Handle("/_health/", &health.Handler{
+               Token:  h.Cluster.ManagementToken,
+               Prefix: "/_health/",
+               Routes: healthRoutes,
+       })
        mux.Handle("/arvados/v1/config", rtr)
+       mux.Handle("/arvados/v1/vocabulary", rtr)
        mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr) // must come before .../users/
        mux.Handle("/arvados/v1/collections", rtr)
        mux.Handle("/arvados/v1/collections/", rtr)
@@ -107,6 +129,8 @@ func (h *Handler) setup() {
        mux.Handle("/arvados/v1/container_requests/", rtr)
        mux.Handle("/arvados/v1/groups", rtr)
        mux.Handle("/arvados/v1/groups/", rtr)
+       mux.Handle("/arvados/v1/links", rtr)
+       mux.Handle("/arvados/v1/links/", rtr)
        mux.Handle("/login", rtr)
        mux.Handle("/logout", rtr)
 
index 9b71c349a4b5624cf32cdf3eb6bba83d06d737bc..f854079f97d87376c9d6e3813b10b2872701d0f5 100644 (file)
@@ -88,6 +88,104 @@ func (s *HandlerSuite) TestConfigExport(c *check.C) {
        }
 }
 
+func (s *HandlerSuite) TestVocabularyExport(c *check.C) {
+       voc := `{
+               "strict_tags": false,
+               "tags": {
+                       "IDTAGIMPORTANCE": {
+                               "strict": false,
+                               "labels": [{"label": "Importance"}],
+                               "values": {
+                                       "HIGH": {
+                                               "labels": [{"label": "High"}]
+                                       },
+                                       "LOW": {
+                                               "labels": [{"label": "Low"}]
+                                       }
+                               }
+                       }
+               }
+       }`
+       f, err := os.CreateTemp("", "test-vocabulary-*.json")
+       c.Assert(err, check.IsNil)
+       defer os.Remove(f.Name())
+       _, err = f.WriteString(voc)
+       c.Assert(err, check.IsNil)
+       f.Close()
+       s.cluster.API.VocabularyPath = f.Name()
+       for _, method := range []string{"GET", "OPTIONS"} {
+               c.Log(c.TestName()+" ", method)
+               req := httptest.NewRequest(method, "/arvados/v1/vocabulary", nil)
+               resp := httptest.NewRecorder()
+               s.handler.ServeHTTP(resp, req)
+               c.Log(resp.Body.String())
+               if !c.Check(resp.Code, check.Equals, http.StatusOK) {
+                       continue
+               }
+               c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, `*`)
+               c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Matches, `.*\bGET\b.*`)
+               c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Matches, `.+`)
+               if method == "OPTIONS" {
+                       c.Check(resp.Body.String(), check.HasLen, 0)
+                       continue
+               }
+               var expectedVoc, receivedVoc *arvados.Vocabulary
+               err := json.Unmarshal([]byte(voc), &expectedVoc)
+               c.Check(err, check.IsNil)
+               err = json.Unmarshal(resp.Body.Bytes(), &receivedVoc)
+               c.Check(err, check.IsNil)
+               c.Check(receivedVoc, check.DeepEquals, expectedVoc)
+       }
+}
+
+func (s *HandlerSuite) TestVocabularyFailedCheckStatus(c *check.C) {
+       voc := `{
+               "strict_tags": false,
+               "tags": {
+                       "IDTAGIMPORTANCE": {
+                               "strict": true,
+                               "labels": [{"label": "Importance"}],
+                               "values": {
+                                       "HIGH": {
+                                               "labels": [{"label": "High"}]
+                                       },
+                                       "LOW": {
+                                               "labels": [{"label": "Low"}]
+                                       }
+                               }
+                       }
+               }
+       }`
+       f, err := os.CreateTemp("", "test-vocabulary-*.json")
+       c.Assert(err, check.IsNil)
+       defer os.Remove(f.Name())
+       _, err = f.WriteString(voc)
+       c.Assert(err, check.IsNil)
+       f.Close()
+       s.cluster.API.VocabularyPath = f.Name()
+
+       req := httptest.NewRequest("POST", "/arvados/v1/collections",
+               strings.NewReader(`{
+                       "collection": {
+                               "properties": {
+                                       "IDTAGIMPORTANCE": "Critical"
+                               }
+                       }
+               }`))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       req.Header.Set("Content-type", "application/json")
+
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Log(resp.Body.String())
+       c.Assert(resp.Code, check.Equals, http.StatusBadRequest)
+       var jresp httpserver.ErrorResponse
+       err = json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Assert(len(jresp.Errors), check.Equals, 1)
+       c.Check(jresp.Errors[0], check.Matches, `.*tag value.*is not valid for key.*`)
+}
+
 func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
        req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
        resp := httptest.NewRecorder()
@@ -245,7 +343,7 @@ func (s *HandlerSuite) CheckObjectType(c *check.C, url string, token string, ski
        resp := httptest.NewRecorder()
        s.handler.ServeHTTP(resp, req)
        c.Assert(resp.Code, check.Equals, http.StatusOK,
-               check.Commentf("Wasn't able to get data from the controller at %q", url))
+               check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String()))
        err = json.Unmarshal(resp.Body.Bytes(), &proxied)
        c.Check(err, check.Equals, nil)
 
index 02061547bf5b826b1618ac37298a876cd205b30b..4cf6a683287ae211f58f13cdb664d087f2ceff00 100644 (file)
@@ -959,3 +959,63 @@ func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
                }
        }
 }
+
+// z3333 should not forward a locally-issued container runtime token,
+// associated with a z1111 user, to its login cluster z1111. z1111
+// would only call back to z3333 and then reject the response because
+// the user ID does not match the token prefix. See
+// dev.arvados.org/issues/18346
+func (s *IntegrationSuite) TestForwardRuntimeTokenToLoginCluster(c *check.C) {
+       db3, db3conn := s.dbConn(c, "z3333")
+       defer db3.Close()
+       defer db3conn.Close()
+       rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+       rootctx3, _, _ := s.testClusters["z3333"].RootClients()
+       conn1 := s.testClusters["z1111"].Conn()
+       conn3 := s.testClusters["z3333"].Conn()
+       userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+
+       user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
+       c.Assert(err, check.IsNil)
+       c.Logf("user1 %+v", user1)
+
+       imageColl, err := conn3.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+               "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar\n",
+       }})
+       c.Assert(err, check.IsNil)
+       c.Logf("imageColl %+v", imageColl)
+
+       cr, err := conn3.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+               "state":           "Committed",
+               "command":         []string{"echo"},
+               "container_image": imageColl.PortableDataHash,
+               "cwd":             "/",
+               "output_path":     "/",
+               "priority":        1,
+               "runtime_constraints": arvados.RuntimeConstraints{
+                       VCPUs: 1,
+                       RAM:   1000000000,
+               },
+       }})
+       c.Assert(err, check.IsNil)
+       c.Logf("container request %+v", cr)
+       ctr, err := conn3.ContainerLock(rootctx3, arvados.GetOptions{UUID: cr.ContainerUUID})
+       c.Assert(err, check.IsNil)
+       c.Logf("container %+v", ctr)
+
+       // We could use conn3.ContainerAuth() here, but that API
+       // hasn't been added to sdk/go/arvados/api.go yet.
+       row := db3conn.QueryRowContext(context.Background(), `SELECT api_token from api_client_authorizations where uuid=$1`, ctr.AuthUUID)
+       c.Check(row, check.NotNil)
+       var val sql.NullString
+       row.Scan(&val)
+       c.Assert(val.Valid, check.Equals, true)
+       runtimeToken := "v2/" + ctr.AuthUUID + "/" + val.String
+       ctrctx, _, _ := s.testClusters["z3333"].ClientsWithToken(runtimeToken)
+       c.Logf("container runtime token %+v", runtimeToken)
+
+       _, err = conn3.UserGet(ctrctx, arvados.GetOptions{UUID: user1.UUID})
+       c.Assert(err, check.NotNil)
+       c.Check(err, check.ErrorMatches, `request failed: .* 401 Unauthorized: cannot use a locally issued token to forward a request to our login cluster \(z1111\)`)
+       c.Check(err, check.Not(check.ErrorMatches), `(?ms).*127\.0\.0\.11.*`)
+}
index d81dd812bfe2ca575fa44895dac2fadd23fc6a72..96c89252ec0285e58dac4330333070c9898cce9e 100644 (file)
@@ -49,8 +49,12 @@ func (conn *Conn) CollectionList(ctx context.Context, opts arvados.ListOptions)
 }
 
 // CollectionCreate defers to railsProxy for everything except blob
-// signatures.
+// signatures and vocabulary checking.
 func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Collection, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.Collection{}, err
+       }
        if len(opts.Select) > 0 {
                // We need to know IsTrashed and TrashAt to implement
                // signing properly, even if the caller doesn't want
@@ -66,8 +70,12 @@ func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptio
 }
 
 // CollectionUpdate defers to railsProxy for everything except blob
-// signatures.
+// signatures and vocabulary checking.
 func (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Collection, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.Collection{}, err
+       }
        if len(opts.Select) > 0 {
                // We need to know IsTrashed and TrashAt to implement
                // signing properly, even if the caller doesn't want
index 4a44949641da71b70985f2a5ce472e94327b81ec..bbfb811165c7c869cfc62e6306611d9e60c6f457 100644 (file)
@@ -48,6 +48,93 @@ func (s *CollectionSuite) TearDownTest(c *check.C) {
        s.railsSpy.Close()
 }
 
+func (s *CollectionSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+       if testVocabulary == "" {
+               testVocabulary = `{
+                       "strict_tags": false,
+                       "tags": {
+                               "IDTAGIMPORTANCES": {
+                                       "strict": true,
+                                       "labels": [{"label": "Importance"}, {"label": "Priority"}],
+                                       "values": {
+                                               "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+                                               "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+                                               "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+                                       }
+                               }
+                       }
+               }`
+       }
+       voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+       c.Assert(err, check.IsNil)
+       s.cluster.API.VocabularyPath = "foo"
+       s.localdb.vocabularyCache = voc
+}
+
+func (s *CollectionSuite) TestCollectionCreateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+
+               coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(coll.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
+
+func (s *CollectionSuite) TestCollectionUpdateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{})
+               c.Assert(err, check.IsNil)
+               coll, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+                       UUID:   coll.UUID,
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(coll.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
+
 func (s *CollectionSuite) TestSignatures(c *check.C) {
        ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
 
index a90deded593ab59c31599e6bcde3ca833a961349..323e660c6f1e75d79721466e513dc8c611a52833 100644 (file)
@@ -6,27 +6,37 @@ package localdb
 
 import (
        "context"
+       "encoding/json"
        "fmt"
+       "net/http"
+       "os"
        "strings"
+       "time"
 
        "git.arvados.org/arvados.git/lib/controller/railsproxy"
        "git.arvados.org/arvados.git/lib/controller/rpc"
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/sirupsen/logrus"
 )
 
 type railsProxy = rpc.Conn
 
 type Conn struct {
-       cluster     *arvados.Cluster
-       *railsProxy // handles API methods that aren't defined on Conn itself
+       cluster                    *arvados.Cluster
+       *railsProxy                // handles API methods that aren't defined on Conn itself
+       vocabularyCache            *arvados.Vocabulary
+       vocabularyFileModTime      time.Time
+       lastVocabularyRefreshCheck time.Time
+       lastVocabularyError        error
        loginController
 }
 
 func NewConn(cluster *arvados.Cluster) *Conn {
        railsProxy := railsproxy.NewConn(cluster)
        railsProxy.RedactHostInErrors = true
-       var conn Conn
-       conn = Conn{
+       conn := Conn{
                cluster:    cluster,
                railsProxy: railsProxy,
        }
@@ -34,6 +44,106 @@ func NewConn(cluster *arvados.Cluster) *Conn {
        return &conn
 }
 
+func (conn *Conn) checkProperties(ctx context.Context, properties interface{}) error {
+       if properties == nil {
+               return nil
+       }
+       var props map[string]interface{}
+       switch properties := properties.(type) {
+       case string:
+               err := json.Unmarshal([]byte(properties), &props)
+               if err != nil {
+                       return err
+               }
+       case map[string]interface{}:
+               props = properties
+       default:
+               return fmt.Errorf("unexpected properties type %T", properties)
+       }
+       voc, err := conn.VocabularyGet(ctx)
+       if err != nil {
+               return err
+       }
+       err = voc.Check(props)
+       if err != nil {
+               return httpErrorf(http.StatusBadRequest, voc.Check(props).Error())
+       }
+       return nil
+}
+
+func (conn *Conn) maybeRefreshVocabularyCache(logger logrus.FieldLogger) error {
+       if conn.lastVocabularyRefreshCheck.Add(time.Second).After(time.Now()) {
+               // Throttle the access to disk to at most once per second.
+               return nil
+       }
+       conn.lastVocabularyRefreshCheck = time.Now()
+       fi, err := os.Stat(conn.cluster.API.VocabularyPath)
+       if err != nil {
+               err = fmt.Errorf("couldn't stat vocabulary file %q: %v", conn.cluster.API.VocabularyPath, err)
+               conn.lastVocabularyError = err
+               return err
+       }
+       if fi.ModTime().After(conn.vocabularyFileModTime) {
+               err = conn.loadVocabularyFile()
+               if err != nil {
+                       conn.lastVocabularyError = err
+                       return err
+               }
+               conn.vocabularyFileModTime = fi.ModTime()
+               conn.lastVocabularyError = nil
+               logger.Info("vocabulary file reloaded successfully")
+       }
+       return nil
+}
+
+func (conn *Conn) loadVocabularyFile() error {
+       vf, err := os.ReadFile(conn.cluster.API.VocabularyPath)
+       if err != nil {
+               return fmt.Errorf("couldn't reading the vocabulary file: %v", err)
+       }
+       mk := make([]string, 0, len(conn.cluster.Collections.ManagedProperties))
+       for k := range conn.cluster.Collections.ManagedProperties {
+               mk = append(mk, k)
+       }
+       voc, err := arvados.NewVocabulary(vf, mk)
+       if err != nil {
+               return fmt.Errorf("while loading vocabulary file %q: %s", conn.cluster.API.VocabularyPath, err)
+       }
+       conn.vocabularyCache = voc
+       return nil
+}
+
+// LastVocabularyError returns the last error encountered while loading the
+// vocabulary file.
+// Implements health.Func
+func (conn *Conn) LastVocabularyError() error {
+       conn.maybeRefreshVocabularyCache(ctxlog.FromContext(context.Background()))
+       return conn.lastVocabularyError
+}
+
+// VocabularyGet refreshes the vocabulary cache if necessary and returns it.
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+       if conn.cluster.API.VocabularyPath == "" {
+               return arvados.Vocabulary{
+                       Tags: map[string]arvados.VocabularyTag{},
+               }, nil
+       }
+       logger := ctxlog.FromContext(ctx)
+       if conn.vocabularyCache == nil {
+               // Initial load of vocabulary file.
+               err := conn.loadVocabularyFile()
+               if err != nil {
+                       logger.WithError(err).Error("error loading vocabulary file")
+                       return arvados.Vocabulary{}, err
+               }
+       }
+       err := conn.maybeRefreshVocabularyCache(logger)
+       if err != nil {
+               logger.WithError(err).Error("error reloading vocabulary file - ignoring")
+       }
+       return *conn.vocabularyCache, nil
+}
+
 // Logout handles the logout of conn giving to the appropriate loginController
 func (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
        return conn.loginController.Logout(ctx, opts)
@@ -96,3 +206,7 @@ func (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupConten
 
        return conn.railsProxy.GroupContents(ctx, options)
 }
+
+func httpErrorf(code int, format string, args ...interface{}) error {
+       return httpserver.ErrorWithStatus(fmt.Errorf(format, args...), code)
+}
diff --git a/lib/controller/localdb/container_request.go b/lib/controller/localdb/container_request.go
new file mode 100644 (file)
index 0000000..5b2ce95
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// ContainerRequestCreate defers to railsProxy for everything except
+// vocabulary checking.
+func (conn *Conn) ContainerRequestCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.ContainerRequest, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.ContainerRequest{}, err
+       }
+       resp, err := conn.railsProxy.ContainerRequestCreate(ctx, opts)
+       if err != nil {
+               return resp, err
+       }
+       return resp, nil
+}
+
+// ContainerRequestUpdate defers to railsProxy for everything except
+// vocabulary checking.
+func (conn *Conn) ContainerRequestUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.ContainerRequest{}, err
+       }
+       resp, err := conn.railsProxy.ContainerRequestUpdate(ctx, opts)
+       if err != nil {
+               return resp, err
+       }
+       return resp, nil
+}
diff --git a/lib/controller/localdb/container_request_test.go b/lib/controller/localdb/container_request_test.go
new file mode 100644 (file)
index 0000000..cca541a
--- /dev/null
@@ -0,0 +1,166 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ContainerRequestSuite{})
+
+type ContainerRequestSuite struct {
+       cluster  *arvados.Cluster
+       localdb  *Conn
+       railsSpy *arvadostest.Proxy
+}
+
+func (s *ContainerRequestSuite) TearDownSuite(c *check.C) {
+       // Undo any changes/additions to the user database so they
+       // don't affect subsequent tests.
+       arvadostest.ResetEnv()
+       c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *ContainerRequestSuite) SetUpTest(c *check.C) {
+       cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+       c.Assert(err, check.IsNil)
+       s.cluster, err = cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+       s.localdb = NewConn(s.cluster)
+       s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+       *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *ContainerRequestSuite) TearDownTest(c *check.C) {
+       s.railsSpy.Close()
+}
+
+func (s *ContainerRequestSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+       if testVocabulary == "" {
+               testVocabulary = `{
+                       "strict_tags": false,
+                       "tags": {
+                               "IDTAGIMPORTANCES": {
+                                       "strict": true,
+                                       "labels": [{"label": "Importance"}, {"label": "Priority"}],
+                                       "values": {
+                                               "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+                                               "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+                                               "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+                                       }
+                               }
+                       }
+               }`
+       }
+       voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+       c.Assert(err, check.IsNil)
+       s.localdb.vocabularyCache = voc
+       s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+
+               cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "command":         []string{"echo", "foo"},
+                               "container_image": "arvados/apitestfixture:latest",
+                               "cwd":             "/tmp",
+                               "environment":     map[string]string{},
+                               "mounts": map[string]interface{}{
+                                       "/out": map[string]interface{}{
+                                               "kind":     "tmp",
+                                               "capacity": 1000000,
+                                       },
+                               },
+                               "output_path": "/out",
+                               "runtime_constraints": map[string]interface{}{
+                                       "vcpus": 1,
+                                       "ram":   2,
+                               },
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(cnt.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
+
+func (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+                       Attrs: map[string]interface{}{
+                               "command":         []string{"echo", "foo"},
+                               "container_image": "arvados/apitestfixture:latest",
+                               "cwd":             "/tmp",
+                               "environment":     map[string]string{},
+                               "mounts": map[string]interface{}{
+                                       "/out": map[string]interface{}{
+                                               "kind":     "tmp",
+                                               "capacity": 1000000,
+                                       },
+                               },
+                               "output_path": "/out",
+                               "runtime_constraints": map[string]interface{}{
+                                       "vcpus": 1,
+                                       "ram":   2,
+                               },
+                       },
+               })
+               c.Assert(err, check.IsNil)
+               cnt, err = s.localdb.ContainerRequestUpdate(ctx, arvados.UpdateOptions{
+                       UUID:   cnt.UUID,
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(cnt.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
diff --git a/lib/controller/localdb/group.go b/lib/controller/localdb/group.go
new file mode 100644 (file)
index 0000000..0d77bdb
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// GroupCreate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) GroupCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Group, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.Group{}, err
+       }
+       resp, err := conn.railsProxy.GroupCreate(ctx, opts)
+       if err != nil {
+               return resp, err
+       }
+       return resp, nil
+}
+
+// GroupUpdate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) GroupUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Group, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.Group{}, err
+       }
+       resp, err := conn.railsProxy.GroupUpdate(ctx, opts)
+       if err != nil {
+               return resp, err
+       }
+       return resp, nil
+}
diff --git a/lib/controller/localdb/group_test.go b/lib/controller/localdb/group_test.go
new file mode 100644 (file)
index 0000000..2d55def
--- /dev/null
@@ -0,0 +1,138 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&GroupSuite{})
+
+type GroupSuite struct {
+       cluster  *arvados.Cluster
+       localdb  *Conn
+       railsSpy *arvadostest.Proxy
+}
+
+func (s *GroupSuite) TearDownSuite(c *check.C) {
+       // Undo any changes/additions to the user database so they
+       // don't affect subsequent tests.
+       arvadostest.ResetEnv()
+       c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *GroupSuite) SetUpTest(c *check.C) {
+       cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+       c.Assert(err, check.IsNil)
+       s.cluster, err = cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+       s.localdb = NewConn(s.cluster)
+       s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+       *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *GroupSuite) TearDownTest(c *check.C) {
+       s.railsSpy.Close()
+}
+
+func (s *GroupSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+       if testVocabulary == "" {
+               testVocabulary = `{
+                       "strict_tags": false,
+                       "tags": {
+                               "IDTAGIMPORTANCES": {
+                                       "strict": true,
+                                       "labels": [{"label": "Importance"}, {"label": "Priority"}],
+                                       "values": {
+                                               "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+                                               "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+                                               "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+                                       }
+                               }
+                       }
+               }`
+       }
+       voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+       c.Assert(err, check.IsNil)
+       s.localdb.vocabularyCache = voc
+       s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+
+               grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "group_class": "project",
+                               "properties":  tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(grp.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
+
+func (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+                       Attrs: map[string]interface{}{
+                               "group_class": "project",
+                       },
+               })
+               c.Assert(err, check.IsNil)
+               grp, err = s.localdb.GroupUpdate(ctx, arvados.UpdateOptions{
+                       UUID:   grp.UUID,
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(grp.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
diff --git a/lib/controller/localdb/link.go b/lib/controller/localdb/link.go
new file mode 100644 (file)
index 0000000..cfcae3d
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// LinkCreate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) LinkCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Link, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.Link{}, err
+       }
+       resp, err := conn.railsProxy.LinkCreate(ctx, opts)
+       if err != nil {
+               return resp, err
+       }
+       return resp, nil
+}
+
+// LinkUpdate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) LinkUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Link, error) {
+       err := conn.checkProperties(ctx, opts.Attrs["properties"])
+       if err != nil {
+               return arvados.Link{}, err
+       }
+       resp, err := conn.railsProxy.LinkUpdate(ctx, opts)
+       if err != nil {
+               return resp, err
+       }
+       return resp, nil
+}
diff --git a/lib/controller/localdb/link_test.go b/lib/controller/localdb/link_test.go
new file mode 100644 (file)
index 0000000..2f07fb4
--- /dev/null
@@ -0,0 +1,142 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&LinkSuite{})
+
+type LinkSuite struct {
+       cluster  *arvados.Cluster
+       localdb  *Conn
+       railsSpy *arvadostest.Proxy
+}
+
+func (s *LinkSuite) TearDownSuite(c *check.C) {
+       // Undo any changes/additions to the user database so they
+       // don't affect subsequent tests.
+       arvadostest.ResetEnv()
+       c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *LinkSuite) SetUpTest(c *check.C) {
+       cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+       c.Assert(err, check.IsNil)
+       s.cluster, err = cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+       s.localdb = NewConn(s.cluster)
+       s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+       *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *LinkSuite) TearDownTest(c *check.C) {
+       s.railsSpy.Close()
+}
+
+func (s *LinkSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+       if testVocabulary == "" {
+               testVocabulary = `{
+                       "strict_tags": false,
+                       "tags": {
+                               "IDTAGIMPORTANCES": {
+                                       "strict": true,
+                                       "labels": [{"label": "Importance"}, {"label": "Priority"}],
+                                       "values": {
+                                               "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+                                               "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+                                               "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+                                       }
+                               }
+                       }
+               }`
+       }
+       voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+       c.Assert(err, check.IsNil)
+       s.localdb.vocabularyCache = voc
+       s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+
+               lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "link_class": "star",
+                               "tail_uuid":  "zzzzz-j7d0g-publicfavorites",
+                               "head_uuid":  arvadostest.FooCollection,
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(lnk.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
+
+func (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {
+       s.setUpVocabulary(c, "")
+       ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+       tests := []struct {
+               name    string
+               props   map[string]interface{}
+               success bool
+       }{
+               {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+               {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+               {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+               {"Empty properties", map[string]interface{}{}, true},
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+                       Attrs: map[string]interface{}{
+                               "link_class": "star",
+                               "tail_uuid":  "zzzzz-j7d0g-publicfavorites",
+                               "head_uuid":  arvadostest.FooCollection,
+                       },
+               })
+               c.Assert(err, check.IsNil)
+               lnk, err = s.localdb.LinkUpdate(ctx, arvados.UpdateOptions{
+                       UUID:   lnk.UUID,
+                       Select: []string{"uuid", "properties"},
+                       Attrs: map[string]interface{}{
+                               "properties": tt.props,
+                       }})
+               if tt.success {
+                       c.Assert(err, check.IsNil)
+                       c.Assert(lnk.Properties, check.DeepEquals, tt.props)
+               } else {
+                       c.Assert(err, check.NotNil)
+               }
+       }
+}
index 03cdcf18d27e4fcf3df814ab3c652c3479456165..01126bcb49a130440ec56bae76dbb78590dc9a3b 100644 (file)
@@ -26,6 +26,10 @@ type responseOptions struct {
 func (rtr *router) responseOptions(opts interface{}) (responseOptions, error) {
        var rOpts responseOptions
        switch opts := opts.(type) {
+       case *arvados.CreateOptions:
+               rOpts.Select = opts.Select
+       case *arvados.UpdateOptions:
+               rOpts.Select = opts.Select
        case *arvados.GetOptions:
                rOpts.Select = opts.Select
        case *arvados.ListOptions:
index 9826c1e7448e548bc41c6f14dd092bacd2046742..02e06279f1168adca61999a543a9e82ad059e424 100644 (file)
@@ -65,6 +65,13 @@ func (rtr *router) addRoutes() {
                                return rtr.backend.ConfigGet(ctx)
                        },
                },
+               {
+                       arvados.EndpointVocabularyGet,
+                       func() interface{} { return &struct{}{} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.backend.VocabularyGet(ctx)
+                       },
+               },
                {
                        arvados.EndpointLogin,
                        func() interface{} { return &arvados.LoginOptions{} },
@@ -307,6 +314,41 @@ func (rtr *router) addRoutes() {
                                return rtr.backend.GroupUntrash(ctx, *opts.(*arvados.UntrashOptions))
                        },
                },
+               {
+                       arvados.EndpointLinkCreate,
+                       func() interface{} { return &arvados.CreateOptions{} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.backend.LinkCreate(ctx, *opts.(*arvados.CreateOptions))
+                       },
+               },
+               {
+                       arvados.EndpointLinkUpdate,
+                       func() interface{} { return &arvados.UpdateOptions{} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.backend.LinkUpdate(ctx, *opts.(*arvados.UpdateOptions))
+                       },
+               },
+               {
+                       arvados.EndpointLinkList,
+                       func() interface{} { return &arvados.ListOptions{Limit: -1} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.backend.LinkList(ctx, *opts.(*arvados.ListOptions))
+                       },
+               },
+               {
+                       arvados.EndpointLinkGet,
+                       func() interface{} { return &arvados.GetOptions{} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.backend.LinkGet(ctx, *opts.(*arvados.GetOptions))
+                       },
+               },
+               {
+                       arvados.EndpointLinkDelete,
+                       func() interface{} { return &arvados.DeleteOptions{} },
+                       func(ctx context.Context, opts interface{}) (interface{}, error) {
+                               return rtr.backend.LinkDelete(ctx, *opts.(*arvados.DeleteOptions))
+                       },
+               },
                {
                        arvados.EndpointSpecimenCreate,
                        func() interface{} { return &arvados.CreateOptions{} },
index 7228956453d1d0c0f6dd460ef7638f19db76a459..ce440dac574f25a14f01fd79425b3e241c6d83fc 100644 (file)
@@ -379,6 +379,7 @@ func (s *RouterIntegrationSuite) TestFullTimestampsInResponse(c *check.C) {
 func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
        uuid := arvadostest.QueuedContainerUUID
        token := arvadostest.ActiveTokenV2
+       // GET
        for _, sel := range [][]string{
                {"uuid", "command"},
                {"uuid", "command", "uuid"},
@@ -395,6 +396,26 @@ func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
                _, hasMounts := resp["mounts"]
                c.Check(hasMounts, check.Equals, false)
        }
+       // POST & PUT
+       uuid = arvadostest.FooCollection
+       j, err := json.Marshal([]string{"uuid", "description"})
+       c.Assert(err, check.IsNil)
+       for _, method := range []string{"PUT", "POST"} {
+               desc := "Today is " + time.Now().String()
+               reqBody := "{\"description\":\"" + desc + "\"}"
+               var resp map[string]interface{}
+               var rr *httptest.ResponseRecorder
+               if method == "PUT" {
+                       _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections/"+uuid+"?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+               } else {
+                       _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+               }
+               c.Check(rr.Code, check.Equals, http.StatusOK)
+               c.Check(resp["kind"], check.Equals, "arvados#collection")
+               c.Check(resp["uuid"], check.HasLen, 27)
+               c.Check(resp["description"], check.Equals, desc)
+               c.Check(resp["manifest_text"], check.IsNil)
+       }
 }
 
 func (s *RouterIntegrationSuite) TestHEAD(c *check.C) {
index 640bbf1c23b837822485bc77b1326791f628c03d..25f47bc3bac4f801f2aa33b90e2ab935b0f651f9 100644 (file)
@@ -178,6 +178,13 @@ func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
        return resp, err
 }
 
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+       ep := arvados.EndpointVocabularyGet
+       var resp arvados.Vocabulary
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, nil)
+       return resp, err
+}
+
 func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
        ep := arvados.EndpointLogin
        var resp arvados.LoginResponse
@@ -495,6 +502,41 @@ func (conn *Conn) GroupUntrash(ctx context.Context, options arvados.UntrashOptio
        return resp, err
 }
 
+func (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+       ep := arvados.EndpointLinkCreate
+       var resp arvados.Link
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+       return resp, err
+}
+
+func (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+       ep := arvados.EndpointLinkUpdate
+       var resp arvados.Link
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+       return resp, err
+}
+
+func (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+       ep := arvados.EndpointLinkGet
+       var resp arvados.Link
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+       return resp, err
+}
+
+func (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+       ep := arvados.EndpointLinkList
+       var resp arvados.LinkList
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+       return resp, err
+}
+
+func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+       ep := arvados.EndpointLinkDelete
+       var resp arvados.Link
+       err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+       return resp, err
+}
+
 func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
        ep := arvados.EndpointSpecimenCreate
        var resp arvados.Specimen
index f9bf3df9d7535fceb20231b8082c359745dc45e2..63a0ada54ee733629b8bd44b7e3bff6df5f7b793 100644 (file)
@@ -617,10 +617,15 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
        }
 
        if pdhOnly {
-               arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
+               // If we are only mounting collections by pdh, make
+               // sure we don't subscribe to websocket events to
+               // avoid putting undesired load on the API server
+               arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
        } else {
                arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
        }
+       // the by_uuid mount point is used by singularity when writing
+       // out docker images converted to SIF
        arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
        arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
 
index 4c5f517b1139dbb16b9b7412b3686c67e41ac33d..c28cf73cbe0cfc694f81464eae65350c703238f8 100644 (file)
@@ -1126,7 +1126,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1146,7 +1146,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "foo,bar", "--crunchstat-interval=5",
-                       "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/out": {realTemp + "/tmp2", false}, "/tmp": {realTemp + "/tmp3", false}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1166,7 +1166,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}, "/etc/arvados/ca-certificates.crt": {stubCertPath, true}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1189,7 +1189,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/keeptmp": {realTemp + "/keep1/tmp0", false}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1212,7 +1212,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{
                        "/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
                        "/keepout": {realTemp + "/keep1/tmp0", false},
@@ -1239,7 +1239,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{
                        "/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
                        "/keepout": {realTemp + "/keep1/tmp0", false},
@@ -1322,7 +1322,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{
                        "/tmp":     {realTemp + "/tmp2", false},
                        "/tmp/foo": {realTemp + "/keep1/tmp0", true},
index ca4b6814752970a9cc1aad7d2bba265f9d887af4..1d3cc09275feb688929d9ab01ca4dd73de42aa5d 100644 (file)
@@ -29,6 +29,7 @@ import (
 var Command cmd.Handler = &installCommand{}
 
 const devtestDatabasePassword = "insecure_arvados_test"
+const goversion = "1.17.1"
 
 type installCommand struct {
        ClusterType    string
@@ -239,7 +240,6 @@ make install
        }
 
        if !prod {
-               goversion := "1.17.1"
                if havegoversion, err := exec.Command("/usr/local/bin/go", "version").CombinedOutput(); err == nil && bytes.HasPrefix(havegoversion, []byte("go version go"+goversion+" ")) {
                        logger.Print("go " + goversion + " already installed")
                } else {
diff --git a/lib/install/deps_go_version_test.go b/lib/install/deps_go_version_test.go
new file mode 100644 (file)
index 0000000..1a69b6e
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package install
+
+import (
+       "bytes"
+       "os/exec"
+       "testing"
+
+       "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+/*
+       TestExtractGoVersion tests the grep/awk command used in
+       tools/arvbox/bin/arvbox to extract the version of Go to install for
+       bootstrapping `arvados-server`.
+
+       If this test is changed, the arvbox code will also need to be updated.
+*/
+func (*Suite) TestExtractGoVersion(c *check.C) {
+       script := `
+  sourcepath="$(realpath ../..)"
+  (cd ${sourcepath} && grep 'const goversion =' lib/install/deps.go |awk -F'"' '{print $2}')
+       `
+       cmd := exec.Command("bash", "-")
+       cmd.Stdin = bytes.NewBufferString("set -ex -o pipefail\n" + script)
+       cmdOutput, err := cmd.Output()
+       c.Assert(err, check.IsNil)
+       c.Assert(string(cmdOutput), check.Equals, goversion+"\n")
+}
index b429e800841eb7e4935c63ebb56560ec93f556eb..0fdc13d1985d085c28db23615dd9ce1c673781cd 100644 (file)
@@ -23,6 +23,7 @@ type APIEndpoint struct {
 
 var (
        EndpointConfigGet                     = APIEndpoint{"GET", "arvados/v1/config", ""}
+       EndpointVocabularyGet                 = APIEndpoint{"GET", "arvados/v1/vocabulary", ""}
        EndpointLogin                         = APIEndpoint{"GET", "login", ""}
        EndpointLogout                        = APIEndpoint{"GET", "logout", ""}
        EndpointCollectionCreate              = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
@@ -62,6 +63,11 @@ var (
        EndpointGroupDelete                   = APIEndpoint{"DELETE", "arvados/v1/groups/{uuid}", ""}
        EndpointGroupTrash                    = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/trash", ""}
        EndpointGroupUntrash                  = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/untrash", ""}
+       EndpointLinkCreate                    = APIEndpoint{"POST", "arvados/v1/links", "link"}
+       EndpointLinkUpdate                    = APIEndpoint{"PATCH", "arvados/v1/links/{uuid}", "link"}
+       EndpointLinkGet                       = APIEndpoint{"GET", "arvados/v1/links/{uuid}", ""}
+       EndpointLinkList                      = APIEndpoint{"GET", "arvados/v1/links", ""}
+       EndpointLinkDelete                    = APIEndpoint{"DELETE", "arvados/v1/links/{uuid}", ""}
        EndpointUserActivate                  = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
        EndpointUserCreate                    = APIEndpoint{"POST", "arvados/v1/users", "user"}
        EndpointUserCurrent                   = APIEndpoint{"GET", "arvados/v1/users/current", ""}
@@ -219,6 +225,7 @@ type BlockWriteResponse struct {
 
 type API interface {
        ConfigGet(ctx context.Context) (json.RawMessage, error)
+       VocabularyGet(ctx context.Context) (Vocabulary, error)
        Login(ctx context.Context, options LoginOptions) (LoginResponse, error)
        Logout(ctx context.Context, options LogoutOptions) (LogoutResponse, error)
        CollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)
@@ -252,6 +259,11 @@ type API interface {
        GroupDelete(ctx context.Context, options DeleteOptions) (Group, error)
        GroupTrash(ctx context.Context, options DeleteOptions) (Group, error)
        GroupUntrash(ctx context.Context, options UntrashOptions) (Group, error)
+       LinkCreate(ctx context.Context, options CreateOptions) (Link, error)
+       LinkUpdate(ctx context.Context, options UpdateOptions) (Link, error)
+       LinkGet(ctx context.Context, options GetOptions) (Link, error)
+       LinkList(ctx context.Context, options ListOptions) (LinkList, error)
+       LinkDelete(ctx context.Context, options DeleteOptions) (Link, error)
        SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
        SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
        SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
index e736f79fd7f2bcee24e449596b4950bd279881bc..474ce33b0ee4e9b2262bbe1b16f9ed36c0c2af38 100644 (file)
@@ -77,6 +77,12 @@ type UploadDownloadRolePermissions struct {
        Admin UploadDownloadPermission
 }
 
+type ManagedProperties map[string]struct {
+       Value     interface{}
+       Function  string
+       Protected bool
+}
+
 type Cluster struct {
        ClusterID       string `json:"-"`
        ManagementToken string
@@ -102,6 +108,7 @@ type Cluster struct {
                WebsocketClientEventQueue      int
                WebsocketServerEventQueue      int
                KeepServiceRequestTimeout      Duration
+               VocabularyPath                 string
        }
        AuditLogs struct {
                MaxAge             Duration
@@ -109,23 +116,19 @@ type Cluster struct {
                UnloggedAttributes StringSet
        }
        Collections struct {
-               BlobSigning              bool
-               BlobSigningKey           string
-               BlobSigningTTL           Duration
-               BlobTrash                bool
-               BlobTrashLifetime        Duration
-               BlobTrashCheckInterval   Duration
-               BlobTrashConcurrency     int
-               BlobDeleteConcurrency    int
-               BlobReplicateConcurrency int
-               CollectionVersioning     bool
-               DefaultTrashLifetime     Duration
-               DefaultReplication       int
-               ManagedProperties        map[string]struct {
-                       Value     interface{}
-                       Function  string
-                       Protected bool
-               }
+               BlobSigning                  bool
+               BlobSigningKey               string
+               BlobSigningTTL               Duration
+               BlobTrash                    bool
+               BlobTrashLifetime            Duration
+               BlobTrashCheckInterval       Duration
+               BlobTrashConcurrency         int
+               BlobDeleteConcurrency        int
+               BlobReplicateConcurrency     int
+               CollectionVersioning         bool
+               DefaultTrashLifetime         Duration
+               DefaultReplication           int
+               ManagedProperties            ManagedProperties
                PreserveVersionIfIdle        Duration
                TrashSweepInterval           Duration
                TrustAllContent              bool
@@ -220,6 +223,7 @@ type Cluster struct {
                Insecure    bool
        }
        Users struct {
+               ActivatedUsersAreVisibleToOthers      bool
                AnonymousUserToken                    string
                AdminNotifierEmailFrom                string
                AutoAdminFirstUser                    bool
@@ -273,7 +277,6 @@ type Cluster struct {
                        Options              map[string]struct{}
                }
                UserProfileFormMessage string
-               VocabularyURL          string
                WelcomePageHTML        string
                InactivePageHTML       string
                SSHHelpPageHTML        string
index 384bebb5997ee86b1b1be2396498f1554ee32ecc..7c68bdb20222f59067b5c5f1d89bad8ea6fef5fe 100644 (file)
@@ -36,6 +36,7 @@ type Container struct {
        RuntimeUserUUID           string                 `json:"runtime_user_uuid"`
        RuntimeAuthScopes         []string               `json:"runtime_auth_scopes"`
        RuntimeToken              string                 `json:"runtime_token"`
+       AuthUUID                  string                 `json:"auth_uuid"`
 }
 
 // ContainerRequest is an arvados#container_request resource.
index f7d1f35a3c322953c702437ca5caecd40687bddd..7df6b84d60eb338fd833944940a2f966192960c2 100644 (file)
@@ -4,17 +4,25 @@
 
 package arvados
 
+import "time"
+
 // Link is an arvados#link record
 type Link struct {
-       UUID       string                 `json:"uuid,omiempty"`
-       OwnerUUID  string                 `json:"owner_uuid"`
-       Name       string                 `json:"name"`
-       LinkClass  string                 `json:"link_class"`
-       HeadUUID   string                 `json:"head_uuid"`
-       HeadKind   string                 `json:"head_kind"`
-       TailUUID   string                 `json:"tail_uuid"`
-       TailKind   string                 `json:"tail_kind"`
-       Properties map[string]interface{} `json:"properties"`
+       UUID                 string                 `json:"uuid,omitempty"`
+       Etag                 string                 `json:"etag"`
+       Href                 string                 `json:"href"`
+       OwnerUUID            string                 `json:"owner_uuid"`
+       Name                 string                 `json:"name"`
+       LinkClass            string                 `json:"link_class"`
+       CreatedAt            time.Time              `json:"created_at"`
+       ModifiedAt           time.Time              `json:"modified_at"`
+       ModifiedByClientUUID string                 `json:"modified_by_client_uuid"`
+       ModifiedByUserUUID   string                 `json:"modified_by_user_uuid"`
+       HeadUUID             string                 `json:"head_uuid"`
+       HeadKind             string                 `json:"head_kind"`
+       TailUUID             string                 `json:"tail_uuid"`
+       TailKind             string                 `json:"tail_kind"`
+       Properties           map[string]interface{} `json:"properties"`
 }
 
 // LinkList is an arvados#linkList resource.
diff --git a/sdk/go/arvados/vocabulary.go b/sdk/go/arvados/vocabulary.go
new file mode 100644 (file)
index 0000000..150091b
--- /dev/null
@@ -0,0 +1,220 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "reflect"
+       "strings"
+)
+
+type Vocabulary struct {
+       reservedTagKeys map[string]bool          `json:"-"`
+       StrictTags      bool                     `json:"strict_tags"`
+       Tags            map[string]VocabularyTag `json:"tags"`
+}
+
+type VocabularyTag struct {
+       Strict bool                          `json:"strict"`
+       Labels []VocabularyLabel             `json:"labels"`
+       Values map[string]VocabularyTagValue `json:"values"`
+}
+
+// Cannot have a constant map in Go, so we have to use a function
+func (v *Vocabulary) systemTagKeys() map[string]bool {
+       return map[string]bool{
+               "type":                  true,
+               "template_uuid":         true,
+               "groups":                true,
+               "username":              true,
+               "image_timestamp":       true,
+               "docker-image-repo-tag": true,
+               "filters":               true,
+               "container_request":     true,
+       }
+}
+
+type VocabularyLabel struct {
+       Label string `json:"label"`
+}
+
+type VocabularyTagValue struct {
+       Labels []VocabularyLabel `json:"labels"`
+}
+
+// NewVocabulary creates a new Vocabulary from a JSON definition and a list
+// of reserved tag keys that will get special treatment when strict mode is
+// enabled.
+func NewVocabulary(data []byte, managedTagKeys []string) (voc *Vocabulary, err error) {
+       if r := bytes.Compare(data, []byte("")); r == 0 {
+               return &Vocabulary{}, nil
+       }
+       err = json.Unmarshal(data, &voc)
+       if err != nil {
+               return nil, fmt.Errorf("invalid JSON format error: %q", err)
+       }
+       if reflect.DeepEqual(voc, &Vocabulary{}) {
+               return nil, fmt.Errorf("JSON data provided doesn't match Vocabulary format: %q", data)
+       }
+       voc.reservedTagKeys = make(map[string]bool)
+       for _, managedKey := range managedTagKeys {
+               voc.reservedTagKeys[managedKey] = true
+       }
+       for systemKey := range voc.systemTagKeys() {
+               voc.reservedTagKeys[systemKey] = true
+       }
+       err = voc.validate()
+       if err != nil {
+               return nil, err
+       }
+       return voc, nil
+}
+
+func (v *Vocabulary) validate() error {
+       if v == nil {
+               return nil
+       }
+       tagKeys := map[string]string{}
+       // Checks for Vocabulary strictness
+       if v.StrictTags && len(v.Tags) == 0 {
+               return fmt.Errorf("vocabulary is strict but no tags are defined")
+       }
+       // Checks for collisions between tag keys, reserved tag keys
+       // and tag key labels.
+       for key := range v.Tags {
+               if v.reservedTagKeys[key] {
+                       return fmt.Errorf("tag key %q is reserved", key)
+               }
+               lcKey := strings.ToLower(key)
+               if tagKeys[lcKey] != "" {
+                       return fmt.Errorf("duplicate tag key %q", key)
+               }
+               tagKeys[lcKey] = key
+               for _, lbl := range v.Tags[key].Labels {
+                       label := strings.ToLower(lbl.Label)
+                       if tagKeys[label] != "" {
+                               return fmt.Errorf("tag label %q for key %q already seen as a tag key or label", lbl.Label, key)
+                       }
+                       tagKeys[label] = lbl.Label
+               }
+               // Checks for value strictness
+               if v.Tags[key].Strict && len(v.Tags[key].Values) == 0 {
+                       return fmt.Errorf("tag key %q is configured as strict but doesn't provide values", key)
+               }
+               // Checks for collisions between tag values and tag value labels.
+               tagValues := map[string]string{}
+               for val := range v.Tags[key].Values {
+                       lcVal := strings.ToLower(val)
+                       if tagValues[lcVal] != "" {
+                               return fmt.Errorf("duplicate tag value %q for tag %q", val, key)
+                       }
+                       // Checks for collisions between labels from different values.
+                       tagValues[lcVal] = val
+                       for _, tagLbl := range v.Tags[key].Values[val].Labels {
+                               label := strings.ToLower(tagLbl.Label)
+                               if tagValues[label] != "" && tagValues[label] != val {
+                                       return fmt.Errorf("tag value label %q for pair (%q:%q) already seen on value %q", tagLbl.Label, key, val, tagValues[label])
+                               }
+                               tagValues[label] = val
+                       }
+               }
+       }
+       return nil
+}
+
+func (v *Vocabulary) getLabelsToKeys() (labels map[string]string) {
+       if v == nil {
+               return
+       }
+       labels = make(map[string]string)
+       for key, val := range v.Tags {
+               for _, lbl := range val.Labels {
+                       label := strings.ToLower(lbl.Label)
+                       labels[label] = key
+               }
+       }
+       return labels
+}
+
+func (v *Vocabulary) getLabelsToValues(key string) (labels map[string]string) {
+       if v == nil {
+               return
+       }
+       labels = make(map[string]string)
+       if _, ok := v.Tags[key]; ok {
+               for val := range v.Tags[key].Values {
+                       labels[strings.ToLower(val)] = val
+                       for _, tagLbl := range v.Tags[key].Values[val].Labels {
+                               label := strings.ToLower(tagLbl.Label)
+                               labels[label] = val
+                       }
+               }
+       }
+       return labels
+}
+
+func (v *Vocabulary) checkValue(key, val string) error {
+       if _, ok := v.Tags[key].Values[val]; !ok {
+               lcVal := strings.ToLower(val)
+               correctValue, ok := v.getLabelsToValues(key)[lcVal]
+               if ok {
+                       return fmt.Errorf("tag value %q for key %q is an alias, must be provided as %q", val, key, correctValue)
+               } else if v.Tags[key].Strict {
+                       return fmt.Errorf("tag value %q is not valid for key %q", val, key)
+               }
+       }
+       return nil
+}
+
+// Check validates the given data against the vocabulary.
+func (v *Vocabulary) Check(data map[string]interface{}) error {
+       if v == nil {
+               return nil
+       }
+       for key, val := range data {
+               // Checks for key validity
+               if v.reservedTagKeys[key] {
+                       // Allow reserved keys to be used even if they are not defined in
+                       // the vocabulary no matter its strictness.
+                       continue
+               }
+               if _, ok := v.Tags[key]; !ok {
+                       lcKey := strings.ToLower(key)
+                       correctKey, ok := v.getLabelsToKeys()[lcKey]
+                       if ok {
+                               return fmt.Errorf("tag key %q is an alias, must be provided as %q", key, correctKey)
+                       } else if v.StrictTags {
+                               return fmt.Errorf("tag key %q is not defined in the vocabulary", key)
+                       }
+                       // If the key is not defined, we don't need to check the value
+                       continue
+               }
+               // Checks for value validity -- key is defined
+               switch val := val.(type) {
+               case string:
+                       err := v.checkValue(key, val)
+                       if err != nil {
+                               return err
+                       }
+               case []interface{}:
+                       for _, singleVal := range val {
+                               switch singleVal := singleVal.(type) {
+                               case string:
+                                       err := v.checkValue(key, singleVal)
+                                       if err != nil {
+                                               return err
+                                       }
+                               default:
+                                       return fmt.Errorf("value list element type for tag key %q was %T, but expected a string", key, singleVal)
+                               }
+                       }
+               default:
+                       return fmt.Errorf("value type for tag key %q was %T, but expected a string or list of strings", key, val)
+               }
+       }
+       return nil
+}
diff --git a/sdk/go/arvados/vocabulary_test.go b/sdk/go/arvados/vocabulary_test.go
new file mode 100644 (file)
index 0000000..5a5189d
--- /dev/null
@@ -0,0 +1,457 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+
+       check "gopkg.in/check.v1"
+)
+
+type VocabularySuite struct {
+       testVoc *Vocabulary
+}
+
+var _ = check.Suite(&VocabularySuite{})
+
+func (s *VocabularySuite) SetUpTest(c *check.C) {
+       s.testVoc = &Vocabulary{
+               reservedTagKeys: map[string]bool{
+                       "reservedKey": true,
+               },
+               StrictTags: false,
+               Tags: map[string]VocabularyTag{
+                       "IDTAGANIMALS": {
+                               Strict: false,
+                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                               Values: map[string]VocabularyTagValue{
+                                       "IDVALANIMAL1": {
+                                               Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Homo sapiens"}},
+                                       },
+                                       "IDVALANIMAL2": {
+                                               Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Loxodonta"}},
+                                       },
+                               },
+                       },
+                       "IDTAGIMPORTANCE": {
+                               Strict: true,
+                               Labels: []VocabularyLabel{{Label: "Importance"}, {Label: "Priority"}},
+                               Values: map[string]VocabularyTagValue{
+                                       "IDVAL3": {
+                                               Labels: []VocabularyLabel{{Label: "Low"}, {Label: "Low priority"}},
+                                       },
+                                       "IDVAL2": {
+                                               Labels: []VocabularyLabel{{Label: "Medium"}, {Label: "Medium priority"}},
+                                       },
+                                       "IDVAL1": {
+                                               Labels: []VocabularyLabel{{Label: "High"}, {Label: "High priority"}},
+                                       },
+                               },
+                       },
+                       "IDTAGCOMMENT": {
+                               Strict: false,
+                               Labels: []VocabularyLabel{{Label: "Comment"}},
+                       },
+               },
+       }
+       err := s.testVoc.validate()
+       c.Assert(err, check.IsNil)
+}
+
+func (s *VocabularySuite) TestCheck(c *check.C) {
+       tests := []struct {
+               name          string
+               strictVoc     bool
+               props         string
+               expectSuccess bool
+               errMatches    string
+       }{
+               // Check succeeds
+               {
+                       "Known key, known value",
+                       false,
+                       `{"IDTAGANIMALS":"IDVALANIMAL1"}`,
+                       true,
+                       "",
+               },
+               {
+                       "Unknown non-alias key on non-strict vocabulary",
+                       false,
+                       `{"foo":"bar"}`,
+                       true,
+                       "",
+               },
+               {
+                       "Known non-strict key, unknown non-alias value",
+                       false,
+                       `{"IDTAGANIMALS":"IDVALANIMAL3"}`,
+                       true,
+                       "",
+               },
+               {
+                       "Undefined but reserved key on strict vocabulary",
+                       true,
+                       `{"reservedKey":"bar"}`,
+                       true,
+                       "",
+               },
+               {
+                       "Known key, list of known values",
+                       false,
+                       `{"IDTAGANIMALS":["IDVALANIMAL1","IDVALANIMAL2"]}`,
+                       true,
+                       "",
+               },
+               {
+                       "Known non-strict key, list of unknown non-alias values",
+                       false,
+                       `{"IDTAGCOMMENT":["hello world","lorem ipsum"]}`,
+                       true,
+                       "",
+               },
+               // Check fails
+               {
+                       "Known first key & value; known 2nd key, unknown 2nd value",
+                       false,
+                       `{"IDTAGANIMALS":"IDVALANIMAL1", "IDTAGIMPORTANCE": "blah blah"}`,
+                       false,
+                       "tag value.*is not valid for key.*",
+               },
+               {
+                       "Unknown non-alias key on strict vocabulary",
+                       true,
+                       `{"foo":"bar"}`,
+                       false,
+                       "tag key.*is not defined in the vocabulary",
+               },
+               {
+                       "Known non-strict key, known value alias",
+                       false,
+                       `{"IDTAGANIMALS":"Loxodonta"}`,
+                       false,
+                       "tag value.*for key.* is an alias, must be provided as.*",
+               },
+               {
+                       "Known strict key, unknown non-alias value",
+                       false,
+                       `{"IDTAGIMPORTANCE":"Unimportant"}`,
+                       false,
+                       "tag value.*is not valid for key.*",
+               },
+               {
+                       "Known strict key, lowercase value regarded as alias",
+                       false,
+                       `{"IDTAGIMPORTANCE":"idval1"}`,
+                       false,
+                       "tag value.*for key.* is an alias, must be provided as.*",
+               },
+               {
+                       "Known strict key, known value alias",
+                       false,
+                       `{"IDTAGIMPORTANCE":"High"}`,
+                       false,
+                       "tag value.* for key.*is an alias, must be provided as.*",
+               },
+               {
+                       "Known strict key, list of known alias values",
+                       false,
+                       `{"IDTAGIMPORTANCE":["High", "Low"]}`,
+                       false,
+                       "tag value.*for key.*is an alias, must be provided as.*",
+               },
+               {
+                       "Known strict key, list of unknown non-alias values",
+                       false,
+                       `{"IDTAGIMPORTANCE":["foo","bar"]}`,
+                       false,
+                       "tag value.*is not valid for key.*",
+               },
+               {
+                       "Invalid value type",
+                       false,
+                       `{"IDTAGANIMALS":1}`,
+                       false,
+                       "value type for tag key.* was.*, but expected a string or list of strings",
+               },
+               {
+                       "Value list of invalid type",
+                       false,
+                       `{"IDTAGANIMALS":[1]}`,
+                       false,
+                       "value list element type for tag key.* was.*, but expected a string",
+               },
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               s.testVoc.StrictTags = tt.strictVoc
+
+               var data map[string]interface{}
+               err := json.Unmarshal([]byte(tt.props), &data)
+               c.Assert(err, check.IsNil)
+               err = s.testVoc.Check(data)
+               if tt.expectSuccess {
+                       c.Assert(err, check.IsNil)
+               } else {
+                       c.Assert(err, check.NotNil)
+                       c.Assert(err.Error(), check.Matches, tt.errMatches)
+               }
+       }
+}
+
+func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
+       tests := []struct {
+               name       string
+               data       string
+               isValid    bool
+               errMatches string
+               expect     *Vocabulary
+       }{
+               {"Empty data", "", true, "", &Vocabulary{}},
+               {"Invalid JSON", "foo", false, "invalid JSON format.*", nil},
+               {"Valid, empty JSON", "{}", false, ".*doesn't match Vocabulary format.*", nil},
+               {"Valid JSON, wrong data", `{"foo":"bar"}`, false, ".*doesn't match Vocabulary format.*", nil},
+               {
+                       "Simple valid example",
+                       `{"tags":{
+                               "IDTAGANIMALS":{
+                                       "strict": false,
+                                       "labels": [{"label": "Animal"}, {"label": "Creature"}],
+                                       "values": {
+                                               "IDVALANIMAL1":{"labels":[{"label":"Human"}, {"label":"Homo sapiens"}]},
+                                               "IDVALANIMAL2":{"labels":[{"label":"Elephant"}, {"label":"Loxodonta"}]},
+                                               "DOG":{"labels":[{"label":"Dog"}, {"label":"Canis lupus familiaris"}, {"label":"dOg"}]}
+                                       }
+                               }
+                       }}`,
+                       true, "",
+                       &Vocabulary{
+                               reservedTagKeys: map[string]bool{
+                                       "type":                  true,
+                                       "template_uuid":         true,
+                                       "groups":                true,
+                                       "username":              true,
+                                       "image_timestamp":       true,
+                                       "docker-image-repo-tag": true,
+                                       "filters":               true,
+                                       "container_request":     true,
+                               },
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                               Values: map[string]VocabularyTagValue{
+                                                       "IDVALANIMAL1": {
+                                                               Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Homo sapiens"}},
+                                                       },
+                                                       "IDVALANIMAL2": {
+                                                               Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Loxodonta"}},
+                                                       },
+                                                       "DOG": {
+                                                               Labels: []VocabularyLabel{{Label: "Dog"}, {Label: "Canis lupus familiaris"}, {Label: "dOg"}},
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+               },
+               {
+                       "Valid data, but uses reserved key",
+                       `{"tags":{
+                               "type":{
+                                       "strict": false,
+                                       "labels": [{"label": "Type"}]
+                               }
+                       }}`,
+                       false, "tag key.*is reserved", nil,
+               },
+       }
+
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               voc, err := NewVocabulary([]byte(tt.data), []string{})
+               if tt.isValid {
+                       c.Assert(err, check.IsNil)
+               } else {
+                       c.Assert(err, check.NotNil)
+                       if tt.errMatches != "" {
+                               c.Assert(err, check.ErrorMatches, tt.errMatches)
+                       }
+               }
+               c.Assert(voc, check.DeepEquals, tt.expect)
+       }
+}
+
+func (s *VocabularySuite) TestValidationErrors(c *check.C) {
+       tests := []struct {
+               name       string
+               voc        *Vocabulary
+               errMatches string
+       }{
+               {
+                       "Strict vocabulary, no keys",
+                       &Vocabulary{
+                               StrictTags: true,
+                       },
+                       "vocabulary is strict but no tags are defined",
+               },
+               {
+                       "Collision between tag key and tag key label",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                       },
+                                       "IDTAGCOMMENT": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "IDTAGANIMALS"}},
+                                       },
+                               },
+                       },
+                       "", // Depending on how the map is sorted, this could be one of two errors
+               },
+               {
+                       "Collision between tag key and tag key label (case-insensitive)",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                       },
+                                       "IDTAGCOMMENT": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "IdTagAnimals"}},
+                                       },
+                               },
+                       },
+                       "", // Depending on how the map is sorted, this could be one of two errors
+               },
+               {
+                       "Collision between tag key labels",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                       },
+                                       "IDTAGCOMMENT": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "Animal"}},
+                                       },
+                               },
+                       },
+                       "tag label.*for key.*already seen.*",
+               },
+               {
+                       "Collision between tag value and tag value label",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                               Values: map[string]VocabularyTagValue{
+                                                       "IDVALANIMAL1": {
+                                                               Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+                                                       },
+                                                       "IDVALANIMAL2": {
+                                                               Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "IDVALANIMAL1"}},
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+                       "", // Depending on how the map is sorted, this could be one of two errors
+               },
+               {
+                       "Collision between tag value and tag value label (case-insensitive)",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                               Values: map[string]VocabularyTagValue{
+                                                       "IDVALANIMAL1": {
+                                                               Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+                                                       },
+                                                       "IDVALANIMAL2": {
+                                                               Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "IDValAnimal1"}},
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+                       "", // Depending on how the map is sorted, this could be one of two errors
+               },
+               {
+                       "Collision between tag value labels",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                               Values: map[string]VocabularyTagValue{
+                                                       "IDVALANIMAL1": {
+                                                               Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+                                                       },
+                                                       "IDVALANIMAL2": {
+                                                               Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Mammal"}},
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+                       "tag value label.*for pair.*already seen.*on value.*",
+               },
+               {
+                       "Collision between tag value labels (case-insensitive)",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: false,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                               Values: map[string]VocabularyTagValue{
+                                                       "IDVALANIMAL1": {
+                                                               Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+                                                       },
+                                                       "IDVALANIMAL2": {
+                                                               Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "mAMMAL"}},
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+                       "tag value label.*for pair.*already seen.*on value.*",
+               },
+               {
+                       "Strict tag key, with no values",
+                       &Vocabulary{
+                               StrictTags: false,
+                               Tags: map[string]VocabularyTag{
+                                       "IDTAGANIMALS": {
+                                               Strict: true,
+                                               Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+                                       },
+                               },
+                       },
+                       "tag key.*is configured as strict but doesn't provide values",
+               },
+       }
+       for _, tt := range tests {
+               c.Log(c.TestName()+" ", tt.name)
+               err := tt.voc.validate()
+               c.Assert(err, check.NotNil)
+               if tt.errMatches != "" {
+                       c.Assert(err, check.ErrorMatches, tt.errMatches)
+               }
+       }
+}
index 8bf01693c444100cb1b866b796e03c5c7699f5ed..0af477125b737a65f1fad46fce3009f5e27d1bcd 100644 (file)
@@ -33,6 +33,10 @@ func (as *APIStub) ConfigGet(ctx context.Context) (json.RawMessage, error) {
        as.appendCall(ctx, as.ConfigGet, nil)
        return nil, as.Error
 }
+func (as *APIStub) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+       as.appendCall(ctx, as.VocabularyGet, nil)
+       return arvados.Vocabulary{}, as.Error
+}
 func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
        as.appendCall(ctx, as.Login, options)
        return arvados.LoginResponse{}, as.Error
@@ -165,6 +169,26 @@ func (as *APIStub) GroupUntrash(ctx context.Context, options arvados.UntrashOpti
        as.appendCall(ctx, as.GroupUntrash, options)
        return arvados.Group{}, as.Error
 }
+func (as *APIStub) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+       as.appendCall(ctx, as.LinkCreate, options)
+       return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+       as.appendCall(ctx, as.LinkUpdate, options)
+       return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+       as.appendCall(ctx, as.LinkGet, options)
+       return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+       as.appendCall(ctx, as.LinkList, options)
+       return arvados.LinkList{}, as.Error
+}
+func (as *APIStub) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+       as.appendCall(ctx, as.LinkDelete, options)
+       return arvados.Link{}, as.Error
+}
 func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
        as.appendCall(ctx, as.SpecimenCreate, options)
        return arvados.Specimen{}, as.Error
index d03265ca44b1b6e886e57d455dadcb7a613d0b6e..55be40fa04fc0f7a46a5de5a239464547f81df4b 100644 (file)
@@ -1546,7 +1546,8 @@ class Collection(RichCollectionBase):
              storage_classes=None,
              trash_at=None,
              merge=True,
-             num_retries=None):
+             num_retries=None,
+             preserve_version=False):
         """Save collection to an existing collection record.
 
         Commit pending buffer blocks to Keep, merge with remote record (if
@@ -1576,6 +1577,13 @@ class Collection(RichCollectionBase):
         :num_retries:
           Retry count on API calls (if None,  use the collection default)
 
+        :preserve_version:
+          If True, indicate that the collection content being saved right now
+          should be preserved in a version snapshot if the collection record is
+          updated in the future. Requires that the API server has
+          Collections.CollectionVersioning enabled, if not, setting this will
+          raise an exception.
+
         """
         if properties and type(properties) is not dict:
             raise errors.ArgumentError("properties must be dictionary type.")
@@ -1588,6 +1596,9 @@ class Collection(RichCollectionBase):
         if trash_at and type(trash_at) is not datetime.datetime:
             raise errors.ArgumentError("trash_at must be datetime type.")
 
+        if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):
+            raise errors.ArgumentError("preserve_version is not supported when CollectionVersioning is not enabled.")
+
         body={}
         if properties:
             body["properties"] = properties
@@ -1596,6 +1607,8 @@ class Collection(RichCollectionBase):
         if trash_at:
             t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
             body["trash_at"] = t
+        if preserve_version:
+            body["preserve_version"] = preserve_version
 
         if not self.committed():
             if self._has_remote_blocks:
@@ -1641,7 +1654,8 @@ class Collection(RichCollectionBase):
                  storage_classes=None,
                  trash_at=None,
                  ensure_unique_name=False,
-                 num_retries=None):
+                 num_retries=None,
+                 preserve_version=False):
         """Save collection to a new collection record.
 
         Commit pending buffer blocks to Keep and, when create_collection_record
@@ -1680,6 +1694,13 @@ class Collection(RichCollectionBase):
         :num_retries:
           Retry count on API calls (if None,  use the collection default)
 
+        :preserve_version:
+          If True, indicate that the collection content being saved right now
+          should be preserved in a version snapshot if the collection record is
+          updated in the future. Requires that the API server has
+          Collections.CollectionVersioning enabled, if not, setting this will
+          raise an exception.
+
         """
         if properties and type(properties) is not dict:
             raise errors.ArgumentError("properties must be dictionary type.")
@@ -1690,6 +1711,9 @@ class Collection(RichCollectionBase):
         if trash_at and type(trash_at) is not datetime.datetime:
             raise errors.ArgumentError("trash_at must be datetime type.")
 
+        if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):
+            raise errors.ArgumentError("preserve_version is not supported when CollectionVersioning is not enabled.")
+
         if self._has_remote_blocks:
             # Copy any remote blocks to the local cluster.
             self._copy_remote_blocks(remote_blocks={})
@@ -1718,6 +1742,8 @@ class Collection(RichCollectionBase):
             if trash_at:
                 t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
                 body["trash_at"] = t
+            if preserve_version:
+                body["preserve_version"] = preserve_version
 
             self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))
             text = self._api_response["manifest_text"]
index 6d2643a967ef70374f2ff222c19bb67917ba5a0e..f9178325091f94ebb97e5f46fba02903adf99104 100644 (file)
@@ -791,6 +791,7 @@ def setup_config():
                     "UserProfileNotificationAddress": "arvados@example.com",
                 },
                 "Collections": {
+                    "CollectionVersioning": True,
                     "BlobSigningKey": "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc",
                     "TrustAllContent": False,
                     "ForwardSlashNameSubstitution": "/",
index f821ff952f7a45f913538c890ffc40d397b04ada..a43e0d40dfe7ed48f5477689d3623afefe952ba3 100644 (file)
@@ -1360,6 +1360,25 @@ class NewCollectionTestCaseWithServersAndTokens(run_test_server.TestCaseWithServ
 
 
 class NewCollectionTestCaseWithServers(run_test_server.TestCaseWithServers):
+    def test_preserve_version_on_save(self):
+        c = Collection()
+        c.save_new(preserve_version=True)
+        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+        self.assertEqual(coll_record['version'], 1)
+        self.assertEqual(coll_record['preserve_version'], True)
+        with c.open("foo.txt", "wb") as foo:
+            foo.write(b"foo")
+        c.save(preserve_version=True)
+        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+        self.assertEqual(coll_record['version'], 2)
+        self.assertEqual(coll_record['preserve_version'], True)
+        with c.open("bar.txt", "wb") as foo:
+            foo.write(b"bar")
+        c.save(preserve_version=False)
+        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+        self.assertEqual(coll_record['version'], 3)
+        self.assertEqual(coll_record['preserve_version'], False)
+
     def test_get_manifest_text_only_committed(self):
         c = Collection()
         with c.open("count.txt", "wb") as f:
index 6e149d45af0d102159a0baa899d54335b389ce25..bdf791153446a7f56013edeaed00a04cb524646a 100644 (file)
@@ -142,7 +142,7 @@ GEM
     metaclass (0.0.4)
     method_source (1.0.0)
     mini_mime (1.1.0)
-    mini_portile2 (2.5.3)
+    mini_portile2 (2.6.1)
     minitest (5.10.3)
     mocha (1.8.0)
       metaclass (~> 0.0.1)
@@ -156,8 +156,8 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.5.7)
-    nokogiri (1.11.7)
-      mini_portile2 (~> 2.5.0)
+    nokogiri (1.12.5)
+      mini_portile2 (~> 2.6.1)
       racc (~> 1.4)
     oj (3.9.2)
     optimist (3.0.0)
@@ -168,7 +168,7 @@ GEM
     pg (1.1.4)
     power_assert (1.1.4)
     public_suffix (4.0.6)
-    racc (1.5.2)
+    racc (1.6.0)
     rack (2.2.3)
     rack-test (1.1.0)
       rack (>= 1.0, < 3)
index 366c03e309ca6f54c0ef5bd2ad9f3aa331c592ea..febb8ea51611eb5a21549b8133770fe059a2ca5c 100644 (file)
@@ -234,8 +234,9 @@ SELECT target_uuid, perm_level
                               name: 'can_read').empty?
 
     # Add can_read link from this user to "all users" which makes this
-    # user "invited"
-    group_perm = create_user_group_link
+    # user "invited", and (depending on config) a link in the opposite
+    # direction which makes this user visible to other users.
+    group_perms = add_to_all_users_group
 
     # Add git repo
     repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
@@ -267,7 +268,7 @@ SELECT target_uuid, perm_level
 
     forget_cached_group_perms
 
-    return [repo_perm, vm_login_perm, group_perm, self].compact
+    return [repo_perm, vm_login_perm, *group_perms, self].compact
   end
 
   # delete user signatures, login, repo, and vm perms, and mark as inactive
@@ -728,16 +729,26 @@ SELECT target_uuid, perm_level
     login_perm
   end
 
-  # add the user to the 'All users' group
-  def create_user_group_link
-    return (Link.where(tail_uuid: self.uuid,
+  def add_to_all_users_group
+    resp = [Link.where(tail_uuid: self.uuid,
                        head_uuid: all_users_group_uuid,
                        link_class: 'permission',
-                       name: 'can_read').first or
+                       name: 'can_read').first ||
             Link.create(tail_uuid: self.uuid,
                         head_uuid: all_users_group_uuid,
                         link_class: 'permission',
-                        name: 'can_read'))
+                        name: 'can_read')]
+    if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers
+      resp += [Link.where(tail_uuid: all_users_group_uuid,
+                          head_uuid: self.uuid,
+                          link_class: 'permission',
+                          name: 'can_read').first ||
+               Link.create(tail_uuid: all_users_group_uuid,
+                           head_uuid: self.uuid,
+                           link_class: 'permission',
+                           name: 'can_read')]
+    end
+    return resp
   end
 
   # Give the special "System group" permission to manage this user and
index c807a7d6cb5ec6f3f4d92ceb18fb519629a3d1fb..ae7b21dec83e556f8321ee7290e5680824be5881 100644 (file)
@@ -13,6 +13,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     @initial_link_count = Link.count
     @vm_uuid = virtual_machines(:testvm).uuid
     ActionMailer::Base.deliveries = []
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
   end
 
   test "activate a user after signing UA" do
index 123031b35feb90b0fc874b0461fff896ca531702..128d0ebaa6f6f255ce54add5e8aac9b39ecca208 100644 (file)
@@ -218,6 +218,7 @@ class PermissionTest < ActiveSupport::TestCase
   end
 
   test "manager user gets permission to minions' articles via can_manage link" do
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
     manager = create :active_user, first_name: "Manage", last_name: "Er"
     minion = create :active_user, first_name: "Min", last_name: "Ion"
     minions_specimen = act_as_user minion do
@@ -314,6 +315,7 @@ class PermissionTest < ActiveSupport::TestCase
   end
 
   test "users with bidirectional read permission in group can see each other, but cannot see each other's private articles" do
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
     a = create :active_user, first_name: "A"
     b = create :active_user, first_name: "B"
     other = create :active_user, first_name: "OTHER"
index c00164c0a36235254248ece8fef06fed3f59e4be..7368d893745658fd20de42de6d2410f421a1098b 100644 (file)
@@ -447,30 +447,40 @@ class UserTest < ActiveSupport::TestCase
     assert_not_allowed { User.new.save }
   end
 
-  test "setup new user" do
-    set_user_from_auth :admin
+  [true, false].each do |visible|
+    test "setup new user with ActivatedUsersAreVisibleToOthers=#{visible}" do
+      Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = visible
+      set_user_from_auth :admin
 
-    email = 'foo@example.com'
+      email = 'foo@example.com'
 
-    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+      user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
 
-    vm = VirtualMachine.create
+      vm = VirtualMachine.create
 
-    response = user.setup(repo_name: 'foo/testrepo',
-                          vm_uuid: vm.uuid)
+      response = user.setup(repo_name: 'foo/testrepo',
+                            vm_uuid: vm.uuid)
 
-    resp_user = find_obj_in_resp response, 'User'
-    verify_user resp_user, email
+      resp_user = find_obj_in_resp response, 'User'
+      verify_user resp_user, email
 
-    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+      group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+      verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
 
-    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
-    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+      group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+      if visible
+        verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+      else
+        assert_nil group_perm2
+      end
 
-    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
-    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
-    assert_equal("foo", vm_perm.properties["username"])
+      repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+      verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+      vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+      verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+      assert_equal("foo", vm_perm.properties["username"])
+    end
   end
 
   test "setup new user with junk in database" do
@@ -514,6 +524,9 @@ class UserTest < ActiveSupport::TestCase
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
     verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
 
+    group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+    verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+
     # invoke setup again with repo_name
     response = user.setup(repo_name: 'foo/testrepo')
     resp_user = find_obj_in_resp response, 'User', nil
@@ -560,7 +573,7 @@ class UserTest < ActiveSupport::TestCase
           break
         end
       else  # looking for a link
-        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).andand.kind == head_kind
           return_obj = x
           break
         end
index 67a2aaa4da881891be106535d38e9bc4969220ab..5f0a1f80f6a4e9f693c91b8946ce41cac6c2f227 100644 (file)
@@ -244,7 +244,7 @@ class Mount(object):
         usr = self.api.users().current().execute(num_retries=self.args.retries)
         now = time.time()
         dir_class = None
-        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries]
+        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
         mount_readme = False
 
         storage_classes = None
@@ -310,7 +310,7 @@ class Mount(object):
             return
 
         e = self.operations.inodes.add_entry(Directory(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api.config))
+            llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
         dir_args[0] = e.inode
 
         for name in self.args.mount_by_id:
index d5a018ae88fcd859adc3047ad2384732a0bfbe92..a2e33c7b3bcc47b7d8288dc60168f75ec151b647 100644 (file)
@@ -36,7 +36,7 @@ class Directory(FreshBase):
     and the value referencing a File or Directory object.
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig):
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write):
         """parent_inode is the integer inode number"""
 
         super(Directory, self).__init__()
@@ -49,6 +49,7 @@ class Directory(FreshBase):
         self.apiconfig = apiconfig
         self._entries = {}
         self._mtime = time.time()
+        self._enable_write = enable_write
 
     def forward_slash_subst(self):
         if not hasattr(self, '_fsns'):
@@ -269,8 +270,8 @@ class CollectionDirectoryBase(Directory):
 
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig, collection):
-        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig)
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection):
+        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
         self.apiconfig = apiconfig
         self.collection = collection
 
@@ -284,10 +285,10 @@ class CollectionDirectoryBase(Directory):
             item.fuse_entry.dead = False
             self._entries[name] = item.fuse_entry
         elif isinstance(item, arvados.collection.RichCollectionBase):
-            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, item))
+            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item))
             self._entries[name].populate(mtime)
         else:
-            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime))
+            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
         item.fuse_entry = self._entries[name]
 
     def on_event(self, event, collection, name, item):
@@ -348,28 +349,36 @@ class CollectionDirectoryBase(Directory):
                 self.new_entry(entry, item, self.mtime())
 
     def writable(self):
-        return self.collection.writable()
+        return self._enable_write and self.collection.writable()
 
     @use_counter
     def flush(self):
+        if not self.writable():
+            return
         with llfuse.lock_released:
             self.collection.root_collection().save()
 
     @use_counter
     @check_update
     def create(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.open(name, "w").close()
 
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.mkdirs(name)
 
     @use_counter
     @check_update
     def unlink(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -377,6 +386,8 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -384,6 +395,9 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, CollectionDirectoryBase):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -413,8 +427,8 @@ class CollectionDirectoryBase(Directory):
 class CollectionDirectory(CollectionDirectoryBase):
     """Represents the root of a directory tree representing a collection."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, collection_record=None, explicit_collection=None):
-        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, None)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
+        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None)
         self.api = api
         self.num_retries = num_retries
         self.collection_record_file = None
@@ -434,14 +448,14 @@ class CollectionDirectory(CollectionDirectoryBase):
             self._mtime = 0
         self._manifest_size = 0
         if self.collection_locator:
-            self._writable = (uuid_pattern.match(self.collection_locator) is not None)
+            self._writable = (uuid_pattern.match(self.collection_locator) is not None) and enable_write
         self._updating_lock = threading.Lock()
 
     def same(self, i):
         return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
 
     def writable(self):
-        return self.collection.writable() if self.collection is not None else self._writable
+        return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)
 
     def want_event_subscribe(self):
         return (uuid_pattern.match(self.collection_locator) is not None)
@@ -603,14 +617,16 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
         def save_new(self):
             pass
 
-    def __init__(self, parent_inode, inodes, api_client, num_retries, storage_classes=None):
+    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
         collection = self.UnsaveableCollection(
             api_client=api_client,
             keep_client=api_client.keep,
             num_retries=num_retries,
             storage_classes_desired=storage_classes)
+        # This is always enable_write=True because it never tries to
+        # save to the backend
         super(TmpCollectionDirectory, self).__init__(
-            parent_inode, inodes, api_client.config, collection)
+            parent_inode, inodes, api_client.config, True, collection)
         self.collection_record_file = None
         self.populate(self.mtime())
 
@@ -703,8 +719,8 @@ and the directory will appear if it exists.
 
 """.lstrip()
 
-    def __init__(self, parent_inode, inodes, api, num_retries, pdh_only=False, storage_classes=None):
-        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
+        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.pdh_only = pdh_only
@@ -720,7 +736,8 @@ and the directory will appear if it exists.
             # If we're the root directory, add an identical by_id subdirectory.
             if self.inode == llfuse.ROOT_INODE:
                 self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, self.pdh_only))
+                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                    self.pdh_only))
 
     def __contains__(self, k):
         if k in self._entries:
@@ -738,11 +755,11 @@ and the directory will appear if it exists.
                 if project[u'items_available'] == 0:
                     return False
                 e = self.inodes.add_entry(ProjectDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries,
+                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
                     project[u'items'][0], storage_classes=self.storage_classes))
             else:
                 e = self.inodes.add_entry(CollectionDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, k))
+                        self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
 
             if e.update():
                 if k not in self._entries:
@@ -776,8 +793,8 @@ and the directory will appear if it exists.
 class TagsDirectory(Directory):
     """A special directory that contains as subdirectories all tags visible to the user."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
-        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self._poll = True
@@ -798,7 +815,8 @@ class TagsDirectory(Directory):
             self.merge(tags['items']+[{"name": n} for n in self._extra],
                        lambda i: i['name'],
                        lambda a, i: a.tag == i['name'],
-                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                              i['name'], poll=self._poll, poll_time=self._poll_time))
 
     @use_counter
     @check_update
@@ -832,9 +850,9 @@ class TagDirectory(Directory):
     to the user that are tagged with a particular tag.
     """
 
-    def __init__(self, parent_inode, inodes, api, num_retries, tag,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
                  poll=False, poll_time=60):
-        super(TagDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.tag = tag
@@ -856,15 +874,15 @@ class TagDirectory(Directory):
         self.merge(taggedcollections['items'],
                    lambda i: i['head_uuid'],
                    lambda a, i: a.collection_locator == i['head_uuid'],
-                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
 
 
 class ProjectDirectory(Directory):
     """A special directory that contains the contents of a project."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, project_object,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
                  poll=True, poll_time=3, storage_classes=None):
-        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.project_object = project_object
@@ -882,12 +900,13 @@ class ProjectDirectory(Directory):
 
     def createDirectory(self, i):
         if collection_uuid_pattern.match(i['uuid']):
-            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
         elif group_uuid_pattern.match(i['uuid']):
-            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time, self.storage_classes)
+            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                    i, self._poll, self._poll_time, self.storage_classes)
         elif link_uuid_pattern.match(i['uuid']):
             if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
-                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
             else:
                 return None
         elif uuid_pattern.match(i['uuid']):
@@ -1022,6 +1041,8 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def writable(self):
+        if not self._enable_write:
+            return False
         with llfuse.lock_released:
             if not self._current_user:
                 self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
@@ -1033,6 +1054,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         try:
             with llfuse.lock_released:
                 c = {
@@ -1053,6 +1077,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if name not in self:
             raise llfuse.FUSEError(errno.ENOENT)
         if not isinstance(self[name], CollectionDirectory):
@@ -1066,6 +1093,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, ProjectDirectory):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -1138,9 +1168,9 @@ class ProjectDirectory(Directory):
 class SharedDirectory(Directory):
     """A special directory that represents users or groups who have shared projects with me."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, exclude,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
                  poll=False, poll_time=60, storage_classes=None):
-        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1231,7 +1261,8 @@ class SharedDirectory(Directory):
             self.merge(contents.items(),
                        lambda i: i[0],
                        lambda a, i: a.uuid() == i[1]['uuid'],
-                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                                  i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
         except Exception:
             _logger.exception("arv-mount shared dir error")
         finally:
index 116b5462b6857aa3452ae59407af309cdaabe36b..45d3db16fe00d7edb802f8d279334b312d8fcc48 100644 (file)
@@ -50,11 +50,12 @@ class File(FreshBase):
 class FuseArvadosFile(File):
     """Wraps a ArvadosFile."""
 
-    __slots__ = ('arvfile',)
+    __slots__ = ('arvfile', '_enable_write')
 
-    def __init__(self, parent_inode, arvfile, _mtime):
+    def __init__(self, parent_inode, arvfile, _mtime, enable_write):
         super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
         self.arvfile = arvfile
+        self._enable_write = enable_write
 
     def size(self):
         with llfuse.lock_released:
@@ -72,7 +73,7 @@ class FuseArvadosFile(File):
         return False
 
     def writable(self):
-        return self.arvfile.writable()
+        return self._enable_write and self.arvfile.writable()
 
     def flush(self):
         with llfuse.lock_released:
index fe2ff929dc25d13000d600b66c4a3e75d76aac27..7cf8aa373a9e3b215593d507da0bb216531cf8d4 100644 (file)
@@ -57,12 +57,15 @@ class MountTestBase(unittest.TestCase):
         llfuse.close()
 
     def make_mount(self, root_class, **root_kwargs):
+        enable_write = True
+        if 'enable_write' in root_kwargs:
+            enable_write = root_kwargs.pop('enable_write')
         self.operations = fuse.Operations(
             os.getuid(), os.getgid(),
             api_client=self.api,
-            enable_write=True)
+            enable_write=enable_write)
         self.operations.inodes.add_entry(root_class(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
+            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
         llfuse.init(self.operations, self.mounttmp, [])
         self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
         self.llfuse_thread.daemon = True
index 157f55e4a4be4ed035aeeeba6f5cfdb402c8a348..ece316193d4ee6a82cf04f6a685f09b0af453cf3 100644 (file)
@@ -1113,7 +1113,7 @@ class MagicDirApiError(FuseMagicTest):
 
 class SanitizeFilenameTest(MountTestBase):
     def test_sanitize_filename(self):
-        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, project_object=self.api.users().current().execute())
+        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
         acceptable = [
             "foo.txt",
             ".foo",
@@ -1293,3 +1293,25 @@ class StorageClassesTest(IntegrationTest):
     @staticmethod
     def _test_collection_custom_storage_classes(self, coll):
         self.assertEqual(storage_classes_desired(coll), ['foo'])
+
+def _readonlyCollectionTestHelper(mounttmp):
+    f = open(os.path.join(mounttmp, 'thing1.txt'), 'rt')
+    # Testing that close() doesn't raise an error.
+    f.close()
+
+class ReadonlyCollectionTest(MountTestBase):
+    def setUp(self):
+        super(ReadonlyCollectionTest, self).setUp()
+        cw = arvados.collection.Collection()
+        with cw.open('thing1.txt', 'wt') as f:
+            f.write("data 1")
+        cw.save_new(owner_uuid=run_test_server.fixture("groups")["aproject"]["uuid"])
+        self.testcollection = cw.api_response()
+
+    def runTest(self):
+        settings = arvados.config.settings().copy()
+        settings["ARVADOS_API_TOKEN"] = run_test_server.fixture("api_client_authorizations")["project_viewer"]["api_token"]
+        self.api = arvados.safeapi.ThreadSafeApiCache(settings)
+        self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection, enable_write=False)
+
+        self.pool.apply(_readonlyCollectionTestHelper, (self.mounttmp,))
index a74616604814488ed9c54fa3e5026f2d4f37c907..f076ccf18419675499e12eed0e3d017824af8e57 100644 (file)
@@ -135,6 +135,7 @@ func (v *UnixVolume) GetDeviceID() string {
        if err != nil {
                return giveup("opening %q: %s", udir, err)
        }
+       defer d.Close()
        uuids, err := d.Readdirnames(0)
        if err != nil {
                return giveup("reading %q: %s", udir, err)
@@ -274,29 +275,25 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
                return fmt.Errorf("error creating directory %s: %s", bdir, err)
        }
 
-       tmpfile, tmperr := v.os.TempFile(bdir, "tmp"+loc)
-       if tmperr != nil {
-               return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, tmperr)
-       }
-
        bpath := v.blockPath(loc)
+       tmpfile, err := v.os.TempFile(bdir, "tmp"+loc)
+       if err != nil {
+               return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, err)
+       }
+       defer v.os.Remove(tmpfile.Name())
+       defer tmpfile.Close()
 
-       if err := v.lock(ctx); err != nil {
+       if err = v.lock(ctx); err != nil {
                return err
        }
        defer v.unlock()
        n, err := io.Copy(tmpfile, rdr)
        v.os.stats.TickOutBytes(uint64(n))
        if err != nil {
-               err = fmt.Errorf("error writing %s: %s", bpath, err)
-               tmpfile.Close()
-               v.os.Remove(tmpfile.Name())
-               return err
+               return fmt.Errorf("error writing %s: %s", bpath, err)
        }
-       if err := tmpfile.Close(); err != nil {
-               err = fmt.Errorf("error closing %s: %s", tmpfile.Name(), err)
-               v.os.Remove(tmpfile.Name())
-               return err
+       if err = tmpfile.Close(); err != nil {
+               return fmt.Errorf("error closing %s: %s", tmpfile.Name(), err)
        }
        // ext4 uses a low-precision clock and effectively backdates
        // files by up to 10 ms, sometimes across a 1-second boundary,
@@ -307,14 +304,10 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
        v.os.stats.TickOps("utimes")
        v.os.stats.Tick(&v.os.stats.UtimesOps)
        if err = os.Chtimes(tmpfile.Name(), ts, ts); err != nil {
-               err = fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
-               v.os.Remove(tmpfile.Name())
-               return err
+               return fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
        }
-       if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
-               err = fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
-               v.os.Remove(tmpfile.Name())
-               return err
+       if err = v.os.Rename(tmpfile.Name(), bpath); err != nil {
+               return fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
        }
        return nil
 }
index 36a33376a1892fb88f5458e18a1d3f59484c27e0..ca98ff63f1492a55e7b4d03ad3442f3fd37b8665 100755 (executable)
@@ -400,6 +400,9 @@ build() {
     fi
     set -e
 
+    # Get the go version we should use for bootstrapping
+    GO_VERSION=`grep 'const goversion =' $LOCAL_ARVADOS_ROOT/lib/install/deps.go |awk -F'"' '{print $2}'`
+
     if test "$1" = localdemo -o "$1" = publicdemo ; then
         BUILDTYPE=demo
     else
@@ -411,6 +414,7 @@ build() {
     fi
 
     docker build --build-arg=BUILDTYPE=$BUILDTYPE $NO_CACHE \
+          --build-arg=go_version=$GO_VERSION \
           --build-arg=arvados_version=$ARVADOS_BRANCH \
           --build-arg=workbench2_version=$WORKBENCH2_BRANCH \
           --build-arg=workdir=/tools/arvbox/lib/arvbox/docker \
@@ -419,6 +423,7 @@ build() {
           "$LOCAL_ARVADOS_ROOT"
     docker tag $FORCE arvados/arvbox-base:$GITHEAD arvados/arvbox-base:latest
     docker build $NO_CACHE \
+          --build-arg=go_version=$GO_VERSION \
           --build-arg=arvados_version=$ARVADOS_BRANCH \
           --build-arg=workbench2_version=$WORKBENCH2_BRANCH \
           -t arvados/arvbox-$BUILDTYPE:$GITHEAD \
index 27757be649d7f8ee015aed39145846e3c1d5f3c9..c93c1a10a16438e0ee5945cd3356c1cc14b894ef 100644 (file)
@@ -21,15 +21,16 @@ RUN apt-get update && \
     build-essential ca-certificates git libpam0g-dev wget
 
 ENV GOPATH /var/lib/gopath
+ARG go_version
 
-# Get Go 1.16.9
+# Get Go
 RUN cd /usr/src && \
-    wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
-    tar xzf go1.16.9.linux-amd64.tar.gz && \
-    ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
-    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
-    ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
-    ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+    wget https://golang.org/dl/go${go_version}.linux-amd64.tar.gz && \
+    tar xzf go${go_version}.linux-amd64.tar.gz && \
+    ln -s /usr/src/go/bin/go /usr/local/bin/go-${go_version} && \
+    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${go_version} && \
+    ln -s /usr/local/bin/go-${go_version} /usr/local/bin/go && \
+    ln -s /usr/local/bin/gofmt-${go_version} /usr/local/bin/gofmt
 
 # the --mount option requires the experimental syntax enabled (enables
 # buildkit) on the first line of this file. This Dockerfile must also be built
@@ -49,15 +50,15 @@ RUN apt-get update && \
     build-essential ca-certificates git libpam0g-dev wget
 
 ENV GOPATH /var/lib/gopath
+ARG go_version
 
-# Get Go 1.16.9
 RUN cd /usr/src && \
-    wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
-    tar xzf go1.16.9.linux-amd64.tar.gz && \
-    ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
-    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
-    ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
-    ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+    wget https://golang.org/dl/go${go_version}.linux-amd64.tar.gz && \
+    tar xzf go${go_version}.linux-amd64.tar.gz && \
+    ln -s /usr/src/go/bin/go /usr/local/bin/go-${go_version} && \
+    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${go_version} && \
+    ln -s /usr/local/bin/go-${go_version} /usr/local/bin/go && \
+    ln -s /usr/local/bin/gofmt-${go_version} /usr/local/bin/gofmt
 
 ARG arvados_version
 RUN echo arvados_version is git commit $arvados_version