.Rproj.user
_version.py
*.bak
+*.log
arvados-snakeoil-ca.pem
.vagrant
mime-types-data (~> 3.2015)
mime-types-data (3.2019.0331)
mini_mime (1.1.0)
- mini_portile2 (2.5.3)
+ mini_portile2 (2.6.1)
minitest (5.10.3)
mocha (1.8.0)
metaclass (~> 0.0.1)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.5.7)
- nokogiri (1.11.7)
- mini_portile2 (~> 2.5.0)
+ nokogiri (1.12.5)
+ mini_portile2 (~> 2.6.1)
racc (~> 1.4)
npm-rails (0.2.1)
rails (>= 3.2)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
public_suffix (4.0.6)
- racc (1.5.2)
+ racc (1.6.0)
rack (2.2.3)
rack-mini-profiler (1.0.2)
rack (>= 1.2.0)
opts.TargetOS,
"bash", "-c", `
set -e -o pipefail
-apt-get update
+apt-get --allow-releaseinfo-change update
apt-get install -y --no-install-recommends dpkg-dev eatmydata
mkdir /tmp/pkg
ln -s /pkg/*.deb /tmp/pkg/
(cd /tmp/pkg; dpkg-scanpackages --multiversion . | gzip > Packages.gz)
echo >/etc/apt/sources.list.d/arvados-local.list "deb [trusted=yes] file:///tmp/pkg ./"
-apt-get update
+apt-get --allow-releaseinfo-change update
eatmydata apt-get install -y --no-install-recommends arvados-server-easy postgresql
eatmydata apt-get remove -y dpkg-dev
"bash", "-c", `
set -e -o pipefail
PATH="/var/lib/arvados/bin:$PATH"
-apt-get update
+apt-get --allow-releaseinfo-change update
apt-get install -y --no-install-recommends dpkg-dev
mkdir /tmp/pkg
ln -s /pkg/*.deb /tmp/pkg/
echo
echo >/etc/apt/sources.list.d/arvados-local.list "deb [trusted=yes] file:///tmp/pkg ./"
-apt-get update
+apt-get --allow-releaseinfo-change update
eatmydata apt-get install --reinstall -y --no-install-recommends arvados-server-easy`+versionsuffix+`
SUDO_FORCE_REMOVE=yes apt-get autoremove -y
Dir.chdir(".site") do
`which linkchecker`
if $? == 0
- system "linkchecker index.html --ignore-url='!file://'" or exit $?.exitstatus
+ # we need --check-extern to check relative links, weird but true
+ system "linkchecker index.html --check-extern --ignore-url='!file://'" or exit $?.exitstatus
else
puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
end
- admin/keep-balance.html.textile.liquid
- admin/controlling-container-reuse.html.textile.liquid
- admin/logs-table-management.html.textile.liquid
- - admin/workbench2-vocabulary.html.textile.liquid
+ - admin/metadata-vocabulary.html.textile.liquid
- admin/storage-classes.html.textile.liquid
- admin/keep-recovering-data.html.textile.liquid
- admin/keep-measuring-deduplication.html.textile.liquid
-{
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}{
"strict_tags": false,
"tags": {
"IDTAGANIMALS": {
{% endcomment %}
{% include 'notebox_begin' %}
-This tutorial assumes that you have access to the "Arvados command line tools":/user/getting_started/setup-cli.html and have set the "API token":{{site.baseurl}}/user/reference/api-tokens.html and confirmed a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html .
+This tutorial assumes that you have access to the "Arvados command line tools":{{ site.baseurl }}/user/getting_started/setup-cli.html and have set the "API token":{{site.baseurl}}/user/reference/api-tokens.html and confirmed a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html .
{% include 'notebox_end' %}
---
layout: default
navsection: admin
-title: User properties vocabulary
+title: Metadata vocabulary
...
{% comment %}
Many Arvados objects (like collections and projects) can store metadata as properties that in turn can be used in searches allowing a flexible way of organizing data inside the system.
-The Workbench2 user interface enables the site adminitrator to set up a properties vocabulary formal definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept.
+Arvados enables the site administrator to set up a formal metadata vocabulary definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept on clients' UI such as workbench2.
-h2. Workbench2 configuration
+The Controller service loads and caches the configured vocabulary file in memory at startup time, exporting it on a particular endpoint. From time to time, it'll check for updates in the local copy and refresh its cache if validation passes.
-Workbench2 retrieves the vocabulary file URL from the cluster config as shown:
+h2. Configuration
+
+The site administrator should place the JSON vocabulary file on the same host as the controller service and set up the config file as follows:
<notextile>
<pre><code>Cluster:
zzzzz:
- Workbench:
- VocabularyURL: <span class="userinput">https://site.example.com/vocabulary.json</span>
+ API:
+ VocabularyPath: <span class="userinput">/etc/arvados/vocabulary.json</span>
</code></pre>
</notextile>
The following is an example of a vocabulary definition:
{% codeblock as json %}
-{% include 'wb2_vocabulary_example' %}
+{% include 'metadata_vocabulary_example' %}
{% endcodeblock %}
-If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. Take notice that this restriction is at the client level on Workbench2, it doesn't limit the user's ability to set any arbitrary property via other means (e.g. Python SDK or CLI commands)
+For clients to be able to query the vocabulary definition, a special endpoint is exposed on the @controller@ service: @/arvados/v1/vocabulary@. This endpoint doesn't require authentication and returns the vocabulary definition in JSON format.
+
+If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. This restriction is enforced at the backend level to ensure consistency across different clients.
Inside the @tags@ member, IDs are defined (@IDTAGANIMALS@, @IDTAGCOMMENT@, @IDTAGIMPORTANCES@) and can have any format that the current application requires. Every key will declare at least a @labels@ list with zero or more label objects.
<div class="releasenotes">
</notextile>
-h2(#main). development main (as of 2021-10-27)
+h2(#main). development main (as of 2021-11-10)
"previous: Upgrading from 2.3.0":#v2_3_0
+h3. Users are visible to other users by default
+
+When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@.
+
h3. Dedicated keepstore process for each container
When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.
* If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers.
* This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication@. If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log.
+h3. Backend support for vocabulary checking
+
+If your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed.
+This will enable the vocabulary checking cluster-wide, including Workbench2. The @Workbench.VocabularyURL@ configuration parameter is deprecated and will be removed in a future release.
+You can read more about how this feature works on the "admin page":{{site.baseurl}}/admin/metadata-vocabulary.html.
+
h2(#v2_3_0). v2.3.0 (2021-10-27)
"previous: Upgrading to 2.2.0":#v2_2_0
h3. New property vocabulary format for Workbench2
-(feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "workbench2 vocabulary format admin page":{{site.baseurl}}/admin/workbench2-vocabulary.html for more information.
+(feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "metadata vocabulary format admin page":{{site.baseurl}}/admin/metadata-vocabulary.html for more information.
h3. Cloud installations only: node manager replaced by arvados-dispatch-cloud
As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed.
-One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
+One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
Python scripts that rely on the distribution Arvados Python SDK packages to import the Arvados SDK will need to be tweaked to load the correct Python environment.
This installation method is recommended to make the CLI tools available system-wide. It can coexist with the installation method described in option 2, below.
-First, configure the "Arvados package repositories":../../install/packages.html
+First, configure the "Arvados package repositories":{{ site.baseurl }}/install/packages.html
{% assign arvados_component = 'python3-arvados-user-activity' %}
Step 3: Run @pip install .@ in an appropriate installation environment, such as a @virtualenv@.
-Note: depends on the "Arvados Python SDK":../sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).
+Note: depends on the "Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).
h2. Usage
If a User has *can_manage* permission on some object, the user has the ability to read, create, update and delete permission links with @head_uuid@ of the managed object. In other words, the user has the ability to modify the permission grants on the object.
-The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the CLI cheat sheet":/install/cheat_sheet.html#vm-login for an example.
+The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the 'User management at the CLI' page":{{ site.baseurl }}/admin/user-management-cli.html#vm-login for an example.
h3. Transitive permissions
* The name of a project is unique only among projects and filters with the same owner_uuid.
* Projects can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@). Putting a project in a @tail_uuid@ field is an error.
-A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount@. See "the groups API documentation":/api/methods/groups.html for more information.
+A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount@. See "the groups API documentation":{{ site.baseurl }}/api/methods/groups.html for more information.
* A filter group cannot own things (cannot appear in @owner_uuid@). Putting a filter group in an @owner_uuid@ field is an error.
* A filter group can be owned by a user or a project.
* The name of a filter is unique only among projects and filters with the same owner_uuid.
Arvados @projects@ are used to organize objects. Projects can contain @collections@, @container requests@, @workflows@, etc. Projects can also contain other projects. An object is part of a project if the @owner_uuid@ of the object is set to the uuid of the project.
-Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":/api/methods/groups.html.
+Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":{{ site.baseurl }}/api/methods/groups.html.
Projects can be manipulated via Workbench, the cli tools, the SDKs, and the Arvados APIs.
h3. Keep clients for data access
In order to access data in Keep, a client is needed to store data in and retrieve data from Keep. Different types of Keep clients exist:
-* a command line client like "@arv-get@":/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":/user/tutorials/tutorial-keep.html#upload-using-command
-* a FUSE mount provided by "@arv-mount@":/user/tutorials/tutorial-keep-mount-gnu-linux.html
+* a command line client like "@arv-get@":{{ site.baseurl }}/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":{{ site.baseurl }}/user/tutorials/tutorial-keep.html#upload-using-command
+* a FUSE mount provided by "@arv-mount@":{{ site.baseurl }}/user/tutorials/tutorial-keep-mount-gnu-linux.html
* a WebDAV mount provided by @keep-web@
* an S3-compatible endpoint provided by @keep-web@
-* programmatic access via the "Arvados SDKs":/sdk/index.html
+* programmatic access via the "Arvados SDKs":{{ site.baseurl }}/sdk/index.html
-In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":/architecture/keep-clients.html.
+In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":{{ site.baseurl }}/architecture/keep-clients.html.
For example, when a request comes in to read a file from Keep, the client will
* request the collection object (including its manifest) from the API server
h3. API server
-The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
+The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":{{ site.baseurl }}/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
h3. Keepstore
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This page documents setting up and running the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
+This page documents setting up and running the "Arvados on Kubernetes":{{ site.baseurl }}/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
h2. Prerequisites
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This page documents setting up and running the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
+This page documents setting up and running the "Arvados on Kubernetes":{{ site.baseurl }}/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
h2. Prerequisites
* Minikube or Google Kubernetes Engine (Kubernetes 1.10+ with at least 3 nodes, 2+ cores per node)
* @kubectl@ and @Helm 3@ installed locally, and able to connect to your Kubernetes cluster
-Please refer to "Arvados on Minikube":/install/arvados-on-kubernetes-minikube.html or "Arvados on GKE":/install/arvados-on-kubernetes-GKE.html for detailed installation instructions.
+Please refer to "Arvados on Minikube":{{ site.baseurl }}/install/arvados-on-kubernetes-minikube.html or "Arvados on GKE":{{ site.baseurl }}/install/arvados-on-kubernetes-GKE.html for detailed installation instructions.
{% endcomment %}
{% include 'notebox_begin_warning' %}
-This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.
{% include 'notebox_end' %}
{% include 'notebox_begin_warning' %}
{% endcomment %}
{% include 'notebox_begin_warning' %}
-This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.
{% include 'notebox_end' %}
{% include 'notebox_begin_warning' %}
h2. Vocabulary configuration (optional)
-Workbench2 can load a vocabulary file which lists available metadata properties for groups and collections. To configure the property vocabulary definition, please visit the "Workbench2 Vocabulary Format":{{site.baseurl}}/admin/workbench2-vocabulary.html page in the Admin section.
+Workbench2 can load a vocabulary file which lists available metadata properties for groups and collections. To configure the property vocabulary definition, please visit the "Metadata Vocabulary Format":{{site.baseurl}}/admin/metadata-vocabulary.html page in the Admin section.
{% assign arvados_component = 'arvados-workbench2' %}
# "Introduction":#introduction
# "Hosts preparation":#hosts_preparation
-## "Hosts setup using terraform (experimental)":#hosts_setup_using_terraform
## "Create a compute image":#create_a_compute_image
# "Multi host install using the provision.sh script":#multi_host
# "Choose the desired configuration":#choose_configuration
Again, if your infrastructure differs from the setup proposed above (ie, using RDS or an existing DB server), remember that you will need to edit the configuration files for the scripts so they work with your infrastructure.
-
-h3(#hosts_setup_using_terraform). Hosts setup using terraform (AWS, experimental)
-
-We added a few "terraform":https://terraform.io/ scripts (https://github.com/arvados/arvados/tree/main/tools/terraform) to let you create these instances easier in an AWS account. Check "the Arvados terraform documentation":/doc/install/terraform.html for more details.
-
-
-
-
h2(#multi_host). Multi host install using the provision.sh script
{% include 'branchname' %}
h3(#further_customization). Further customization of the installation (modifying the salt pillars and states)
-You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that can be retrieved as output of the terraform run.
+You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that describes your environment.
Any extra <i>state</i> file you add under <i>local_config_dir/states</i> will be added to the salt run and applied to the hosts.
Users can be identified by their email address or username: the tool will check if every user exist on the system, and report back when not found. Groups on the other hand, are identified by their name.
-Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":/admin/group-management.html.
+Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":{{ site.baseurl }}/admin/group-management.html.
This tool is designed to be run periodically reading a file created by a remote auth system (ie: LDAP) dump script, applying what's included on the file as the source of truth.
One is by "configuring (system-wide) the collection's idle time":{{site.baseurl}}/admin/collection-versioning.html. This idle time is checked against the @modified_at@ attribute so that the version is saved when one or more of the previously enumerated attributes get updated and the @modified_at@ is at least at the configured idle time in the past. This way, a frequently updated collection won't create lots of version records that may not be useful.
-The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated.
+The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated. This includes either creating a new collection or updating a preexisting one. In the case of using @preserve_version = true@ on a collection's create call, the new record state will be preserved as a snapshot on the next update.
h3. Collection's past versions behavior & limitations
# Timeout on requests to internal Keep services.
KeepServiceRequestTimeout: 15s
+ # Vocabulary file path, local to the node running the controller.
+ # This JSON file should contain the description of what's allowed
+ # as object's metadata. Its format is described at:
+ # https://doc.arvados.org/admin/metadata-vocabulary.html
+ VocabularyPath: ""
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
# user agreements. Should only be enabled for development.
NewUsersAreActive: false
+ # Newly activated users (whether set up by an admin or via
+ # AutoSetupNewUsers) immediately become visible to other active
+ # users.
+ #
+ # On a multi-tenant cluster, where the intent is for users to be
+ # invisible to one another unless they have been added to the
+ # same group(s) via Workbench admin interface, change this to
+ # false.
+ ActivatedUsersAreVisibleToOthers: true
+
# The e-mail address of the user you would like to become marked as an admin
# user on their first login.
AutoAdminUserWithEmail: ""
DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
# Workbench2 configs
- VocabularyURL: ""
FileViewersConfigURL: ""
# Idle time after which the user's session will be auto closed.
"API.MaxTokenLifetime": false,
"API.RequestTimeout": true,
"API.SendTimeout": true,
+ "API.VocabularyPath": false,
"API.WebsocketClientEventQueue": false,
"API.WebsocketServerEventQueue": false,
"AuditLogs": false,
"Collections.BlobTrashCheckInterval": false,
"Collections.BlobTrashConcurrency": false,
"Collections.BlobTrashLifetime": false,
- "Collections.CollectionVersioning": false,
+ "Collections.CollectionVersioning": true,
"Collections.DefaultReplication": true,
"Collections.DefaultTrashLifetime": true,
"Collections.ForwardSlashNameSubstitution": true,
"SystemRootToken": false,
"TLS": false,
"Users": true,
+ "Users.ActivatedUsersAreVisibleToOthers": false,
"Users.AdminNotifierEmailFrom": false,
"Users.AnonymousUserToken": true,
"Users.AutoAdminFirstUser": false,
"Workbench.UserProfileFormFields.*.*": true,
"Workbench.UserProfileFormFields.*.*.*": true,
"Workbench.UserProfileFormMessage": true,
- "Workbench.VocabularyURL": true,
"Workbench.WelcomePageHTML": true,
}
# Timeout on requests to internal Keep services.
KeepServiceRequestTimeout: 15s
+ # Vocabulary file path, local to the node running the controller.
+ # This JSON file should contain the description of what's allowed
+ # as object's metadata. Its format is described at:
+ # https://doc.arvados.org/admin/metadata-vocabulary.html
+ VocabularyPath: ""
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
# user agreements. Should only be enabled for development.
NewUsersAreActive: false
+ # Newly activated users (whether set up by an admin or via
+ # AutoSetupNewUsers) immediately become visible to other active
+ # users.
+ #
+ # On a multi-tenant cluster, where the intent is for users to be
+ # invisible to one another unless they have been added to the
+ # same group(s) via Workbench admin interface, change this to
+ # false.
+ ActivatedUsersAreVisibleToOthers: true
+
# The e-mail address of the user you would like to become marked as an admin
# user on their first login.
AutoAdminUserWithEmail: ""
DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
# Workbench2 configs
- VocabularyURL: ""
FileViewersConfigURL: ""
# Idle time after which the user's session will be auto closed.
mux.ServeHTTP(w, req)
})
-
- return mux
}
type CurrentUser struct {
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/health"
)
type Conn struct {
remotes map[string]backend
}
-func New(cluster *arvados.Cluster) *Conn {
+func New(cluster *arvados.Cluster, healthFuncs *map[string]health.Func) *Conn {
local := localdb.NewConn(cluster)
remotes := map[string]backend{}
for id, remote := range cluster.RemoteClusters {
if !remote.Proxy || id == cluster.ClusterID {
continue
}
- conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(local, id))
+ conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(cluster, local, id))
// Older versions of controller rely on the Via header
// to detect loops.
conn.SendHeader = http.Header{"Via": {"HTTP/1.1 arvados-controller"}}
remotes[id] = conn
}
+ if healthFuncs != nil {
+ hf := map[string]health.Func{"vocabulary": local.LastVocabularyError}
+ *healthFuncs = hf
+ }
+
return &Conn{
cluster: cluster,
local: local,
// tokens from an incoming request context, determines whether they
// should (and can) be salted for the given remoteID, and returns the
// resulting tokens.
-func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
+func saltedTokenProvider(cluster *arvados.Cluster, local backend, remoteID string) rpc.TokenProvider {
return func(ctx context.Context) ([]string, error) {
var tokens []string
incoming, ok := auth.FromContext(ctx)
return nil, errors.New("no token provided")
}
for _, token := range incoming.Tokens {
+ if strings.HasPrefix(token, "v2/"+cluster.ClusterID+"-") && remoteID == cluster.Login.LoginCluster {
+ // If we did this, the login cluster
+ // would call back to us and then
+ // reject our response because the
+ // user UUID prefix (i.e., the
+ // LoginCluster prefix) won't match
+ // the token UUID prefix (i.e., our
+ // prefix).
+ return nil, httpErrorf(http.StatusUnauthorized, "cannot use a locally issued token to forward a request to our login cluster (%s)", remoteID)
+ }
salted, err := auth.SaltToken(token, remoteID)
switch err {
case nil:
return json.RawMessage(buf.Bytes()), err
}
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+ return conn.chooseBackend(conn.cluster.ClusterID).VocabularyGet(ctx)
+}
+
func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID {
// defer entire login procedure to designated cluster
return conn.chooseBackend(options.UUID).GroupUntrash(ctx, options)
}
+func (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+ return conn.chooseBackend(options.ClusterID).LinkCreate(ctx, options)
+}
+
+func (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+ return conn.chooseBackend(options.UUID).LinkUpdate(ctx, options)
+}
+
+func (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+ return conn.chooseBackend(options.UUID).LinkGet(ctx, options)
+}
+
+func (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+ return conn.generated_LinkList(ctx, options)
+}
+
+func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+ return conn.chooseBackend(options.UUID).LinkDelete(ctx, options)
+}
+
func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
return conn.generated_SpecimenList(ctx, options)
}
ctx = ctrlctx.NewWithTransaction(ctx, s.tx)
s.ctx = ctx
- s.fed = New(s.cluster)
+ s.fed = New(s.cluster, nil)
}
func (s *FederationSuite) TearDownTest(c *check.C) {
Host: srv.Addr,
Proxy: true,
}
- s.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: "http", Host: srv.Addr}, true, saltedTokenProvider(s.fed.local, id))
+ s.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: "http", Host: srv.Addr}, true, saltedTokenProvider(s.cluster, s.fed.local, id))
}
defer out.Close()
out.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\n\)\n`).Find(buf))
io.WriteString(out, "//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\n")
- for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User"} {
+ for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User", "Link"} {
_, err := out.Write(bytes.ReplaceAll(orig, []byte("Collection"), []byte(t)))
if err != nil {
panic(err)
}
return merged, err
}
+
+func (conn *Conn) generated_LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+ var mtx sync.Mutex
+ var merged arvados.LinkList
+ var needSort atomic.Value
+ needSort.Store(false)
+ err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
+ options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
+ cl, err := backend.LinkList(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ mtx.Lock()
+ defer mtx.Unlock()
+ if len(merged.Items) == 0 {
+ merged = cl
+ } else if len(cl.Items) > 0 {
+ merged.Items = append(merged.Items, cl.Items...)
+ needSort.Store(true)
+ }
+ uuids := make([]string, 0, len(cl.Items))
+ for _, item := range cl.Items {
+ uuids = append(uuids, item.UUID)
+ }
+ return uuids, nil
+ })
+ if needSort.Load().(bool) {
+ // Apply the default/implied order, "modified_at desc"
+ sort.Slice(merged.Items, func(i, j int) bool {
+ mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
+ return mj.Before(mi)
+ })
+ }
+ if merged.Items == nil {
+ // Return empty results as [], not null
+ // (https://github.com/golang/go/issues/27589 might be
+ // a better solution in the future)
+ merged.Items = []arvados.Link{}
+ }
+ return merged, err
+}
s.cluster.Login.LoginCluster = "zhome"
// s.fed is already set by SetUpTest, but we need to
// reinitialize with the above config changes.
- s.fed = New(s.cluster)
+ s.fed = New(s.cluster, nil)
returnTo := "https://app.example.com/foo?bar"
for _, trial := range []struct {
func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
s.cluster.ClusterID = "local"
s.cluster.Login.LoginCluster = "zzzzz"
- s.fed = New(s.cluster)
+ s.fed = New(s.cluster, nil)
s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
for _, updateFail := range []bool{false, true} {
func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
s.cluster.ClusterID = "local"
s.cluster.Login.LoginCluster = "zzzzz"
- s.fed = New(s.cluster)
+ s.fed = New(s.cluster, nil)
s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
opts := arvados.GetOptions{UUID: "zzzzz-tpzed-xurymjxw79nv3jz", Select: []string{"uuid", "email"}}
func (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {
s.cluster.ClusterID = "local"
s.cluster.Login.LoginCluster = "zzzzz"
- s.fed = New(s.cluster)
+ s.fed = New(s.cluster, nil)
s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")},
true, rpc.PassthroughTokenProvider))
"errors"
"fmt"
"net/http"
+ "net/http/httptest"
"net/url"
"strings"
"sync"
return err
}
_, _, err = railsproxy.FindRailsAPI(h.Cluster)
- return err
+ if err != nil {
+ return err
+ }
+ if h.Cluster.API.VocabularyPath != "" {
+ req, err := http.NewRequest("GET", "/arvados/v1/vocabulary", nil)
+ if err != nil {
+ return err
+ }
+ var resp httptest.ResponseRecorder
+ h.handlerStack.ServeHTTP(&resp, req)
+ if resp.Result().StatusCode != http.StatusOK {
+ return fmt.Errorf("%d %s", resp.Result().StatusCode, resp.Result().Status)
+ }
+ }
+ return nil
}
func (h *Handler) Done() <-chan struct{} {
func (h *Handler) setup() {
mux := http.NewServeMux()
- mux.Handle("/_health/", &health.Handler{
- Token: h.Cluster.ManagementToken,
- Prefix: "/_health/",
- Routes: health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }},
- })
+ healthFuncs := make(map[string]health.Func)
oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
- rtr := router.New(federation.New(h.Cluster), router.Config{
+ rtr := router.New(federation.New(h.Cluster, &healthFuncs), router.Config{
MaxRequestSize: h.Cluster.API.MaxRequestSize,
WrapCalls: api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls),
})
+
+ healthRoutes := health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }}
+ for name, f := range healthFuncs {
+ healthRoutes[name] = f
+ }
+ mux.Handle("/_health/", &health.Handler{
+ Token: h.Cluster.ManagementToken,
+ Prefix: "/_health/",
+ Routes: healthRoutes,
+ })
mux.Handle("/arvados/v1/config", rtr)
+ mux.Handle("/arvados/v1/vocabulary", rtr)
mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr) // must come before .../users/
mux.Handle("/arvados/v1/collections", rtr)
mux.Handle("/arvados/v1/collections/", rtr)
mux.Handle("/arvados/v1/container_requests/", rtr)
mux.Handle("/arvados/v1/groups", rtr)
mux.Handle("/arvados/v1/groups/", rtr)
+ mux.Handle("/arvados/v1/links", rtr)
+ mux.Handle("/arvados/v1/links/", rtr)
mux.Handle("/login", rtr)
mux.Handle("/logout", rtr)
}
}
+func (s *HandlerSuite) TestVocabularyExport(c *check.C) {
+ voc := `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCE": {
+ "strict": false,
+ "labels": [{"label": "Importance"}],
+ "values": {
+ "HIGH": {
+ "labels": [{"label": "High"}]
+ },
+ "LOW": {
+ "labels": [{"label": "Low"}]
+ }
+ }
+ }
+ }
+ }`
+ f, err := os.CreateTemp("", "test-vocabulary-*.json")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(f.Name())
+ _, err = f.WriteString(voc)
+ c.Assert(err, check.IsNil)
+ f.Close()
+ s.cluster.API.VocabularyPath = f.Name()
+ for _, method := range []string{"GET", "OPTIONS"} {
+ c.Log(c.TestName()+" ", method)
+ req := httptest.NewRequest(method, "/arvados/v1/vocabulary", nil)
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ c.Log(resp.Body.String())
+ if !c.Check(resp.Code, check.Equals, http.StatusOK) {
+ continue
+ }
+ c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, `*`)
+ c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Matches, `.*\bGET\b.*`)
+ c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Matches, `.+`)
+ if method == "OPTIONS" {
+ c.Check(resp.Body.String(), check.HasLen, 0)
+ continue
+ }
+ var expectedVoc, receivedVoc *arvados.Vocabulary
+ err := json.Unmarshal([]byte(voc), &expectedVoc)
+ c.Check(err, check.IsNil)
+ err = json.Unmarshal(resp.Body.Bytes(), &receivedVoc)
+ c.Check(err, check.IsNil)
+ c.Check(receivedVoc, check.DeepEquals, expectedVoc)
+ }
+}
+
+func (s *HandlerSuite) TestVocabularyFailedCheckStatus(c *check.C) {
+ voc := `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCE": {
+ "strict": true,
+ "labels": [{"label": "Importance"}],
+ "values": {
+ "HIGH": {
+ "labels": [{"label": "High"}]
+ },
+ "LOW": {
+ "labels": [{"label": "Low"}]
+ }
+ }
+ }
+ }
+ }`
+ f, err := os.CreateTemp("", "test-vocabulary-*.json")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(f.Name())
+ _, err = f.WriteString(voc)
+ c.Assert(err, check.IsNil)
+ f.Close()
+ s.cluster.API.VocabularyPath = f.Name()
+
+ req := httptest.NewRequest("POST", "/arvados/v1/collections",
+ strings.NewReader(`{
+ "collection": {
+ "properties": {
+ "IDTAGIMPORTANCE": "Critical"
+ }
+ }
+ }`))
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+ req.Header.Set("Content-type", "application/json")
+
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ c.Log(resp.Body.String())
+ c.Assert(resp.Code, check.Equals, http.StatusBadRequest)
+ var jresp httpserver.ErrorResponse
+ err = json.Unmarshal(resp.Body.Bytes(), &jresp)
+ c.Check(err, check.IsNil)
+ c.Assert(len(jresp.Errors), check.Equals, 1)
+ c.Check(jresp.Errors[0], check.Matches, `.*tag value.*is not valid for key.*`)
+}
+
func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
resp := httptest.NewRecorder()
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
c.Assert(resp.Code, check.Equals, http.StatusOK,
- check.Commentf("Wasn't able to get data from the controller at %q", url))
+ check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String()))
err = json.Unmarshal(resp.Body.Bytes(), &proxied)
c.Check(err, check.Equals, nil)
}
}
}
+
+// z3333 should not forward a locally-issued container runtime token,
+// associated with a z1111 user, to its login cluster z1111. z1111
+// would only call back to z3333 and then reject the response because
+// the user ID does not match the token prefix. See
+// dev.arvados.org/issues/18346
+func (s *IntegrationSuite) TestForwardRuntimeTokenToLoginCluster(c *check.C) {
+ db3, db3conn := s.dbConn(c, "z3333")
+ defer db3.Close()
+ defer db3conn.Close()
+ rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+ rootctx3, _, _ := s.testClusters["z3333"].RootClients()
+ conn1 := s.testClusters["z1111"].Conn()
+ conn3 := s.testClusters["z3333"].Conn()
+ userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+
+ user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
+ c.Assert(err, check.IsNil)
+ c.Logf("user1 %+v", user1)
+
+ imageColl, err := conn3.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar\n",
+ }})
+ c.Assert(err, check.IsNil)
+ c.Logf("imageColl %+v", imageColl)
+
+ cr, err := conn3.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "state": "Committed",
+ "command": []string{"echo"},
+ "container_image": imageColl.PortableDataHash,
+ "cwd": "/",
+ "output_path": "/",
+ "priority": 1,
+ "runtime_constraints": arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1000000000,
+ },
+ }})
+ c.Assert(err, check.IsNil)
+ c.Logf("container request %+v", cr)
+ ctr, err := conn3.ContainerLock(rootctx3, arvados.GetOptions{UUID: cr.ContainerUUID})
+ c.Assert(err, check.IsNil)
+ c.Logf("container %+v", ctr)
+
+ // We could use conn3.ContainerAuth() here, but that API
+ // hasn't been added to sdk/go/arvados/api.go yet.
+ row := db3conn.QueryRowContext(context.Background(), `SELECT api_token from api_client_authorizations where uuid=$1`, ctr.AuthUUID)
+ c.Check(row, check.NotNil)
+ var val sql.NullString
+ row.Scan(&val)
+ c.Assert(val.Valid, check.Equals, true)
+ runtimeToken := "v2/" + ctr.AuthUUID + "/" + val.String
+ ctrctx, _, _ := s.testClusters["z3333"].ClientsWithToken(runtimeToken)
+ c.Logf("container runtime token %+v", runtimeToken)
+
+ _, err = conn3.UserGet(ctrctx, arvados.GetOptions{UUID: user1.UUID})
+ c.Assert(err, check.NotNil)
+ c.Check(err, check.ErrorMatches, `request failed: .* 401 Unauthorized: cannot use a locally issued token to forward a request to our login cluster \(z1111\)`)
+ c.Check(err, check.Not(check.ErrorMatches), `(?ms).*127\.0\.0\.11.*`)
+}
}
// CollectionCreate defers to railsProxy for everything except blob
-// signatures.
+// signatures and vocabulary checking.
func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Collection, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.Collection{}, err
+ }
if len(opts.Select) > 0 {
// We need to know IsTrashed and TrashAt to implement
// signing properly, even if the caller doesn't want
}
// CollectionUpdate defers to railsProxy for everything except blob
-// signatures.
+// signatures and vocabulary checking.
func (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Collection, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.Collection{}, err
+ }
if len(opts.Select) > 0 {
// We need to know IsTrashed and TrashAt to implement
// signing properly, even if the caller doesn't want
s.railsSpy.Close()
}
+func (s *CollectionSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+ if testVocabulary == "" {
+ testVocabulary = `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCES": {
+ "strict": true,
+ "labels": [{"label": "Importance"}, {"label": "Priority"}],
+ "values": {
+ "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+ "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+ "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+ }
+ }
+ }
+ }`
+ }
+ voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+ c.Assert(err, check.IsNil)
+ s.cluster.API.VocabularyPath = "foo"
+ s.localdb.vocabularyCache = voc
+}
+
+func (s *CollectionSuite) TestCollectionCreateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+
+ coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(coll.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
+
+func (s *CollectionSuite) TestCollectionUpdateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{})
+ c.Assert(err, check.IsNil)
+ coll, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ UUID: coll.UUID,
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(coll.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
+
func (s *CollectionSuite) TestSignatures(c *check.C) {
ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
import (
"context"
+ "encoding/json"
"fmt"
+ "net/http"
+ "os"
"strings"
+ "time"
"git.arvados.org/arvados.git/lib/controller/railsproxy"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/sirupsen/logrus"
)
type railsProxy = rpc.Conn
type Conn struct {
- cluster *arvados.Cluster
- *railsProxy // handles API methods that aren't defined on Conn itself
+ cluster *arvados.Cluster
+ *railsProxy // handles API methods that aren't defined on Conn itself
+ vocabularyCache *arvados.Vocabulary
+ vocabularyFileModTime time.Time
+ lastVocabularyRefreshCheck time.Time
+ lastVocabularyError error
loginController
}
func NewConn(cluster *arvados.Cluster) *Conn {
railsProxy := railsproxy.NewConn(cluster)
railsProxy.RedactHostInErrors = true
- var conn Conn
- conn = Conn{
+ conn := Conn{
cluster: cluster,
railsProxy: railsProxy,
}
return &conn
}
+func (conn *Conn) checkProperties(ctx context.Context, properties interface{}) error {
+ if properties == nil {
+ return nil
+ }
+ var props map[string]interface{}
+ switch properties := properties.(type) {
+ case string:
+ err := json.Unmarshal([]byte(properties), &props)
+ if err != nil {
+ return err
+ }
+ case map[string]interface{}:
+ props = properties
+ default:
+ return fmt.Errorf("unexpected properties type %T", properties)
+ }
+ voc, err := conn.VocabularyGet(ctx)
+ if err != nil {
+ return err
+ }
+ err = voc.Check(props)
+ if err != nil {
+ return httpErrorf(http.StatusBadRequest, voc.Check(props).Error())
+ }
+ return nil
+}
+
+func (conn *Conn) maybeRefreshVocabularyCache(logger logrus.FieldLogger) error {
+ if conn.lastVocabularyRefreshCheck.Add(time.Second).After(time.Now()) {
+ // Throttle the access to disk to at most once per second.
+ return nil
+ }
+ conn.lastVocabularyRefreshCheck = time.Now()
+ fi, err := os.Stat(conn.cluster.API.VocabularyPath)
+ if err != nil {
+ err = fmt.Errorf("couldn't stat vocabulary file %q: %v", conn.cluster.API.VocabularyPath, err)
+ conn.lastVocabularyError = err
+ return err
+ }
+ if fi.ModTime().After(conn.vocabularyFileModTime) {
+ err = conn.loadVocabularyFile()
+ if err != nil {
+ conn.lastVocabularyError = err
+ return err
+ }
+ conn.vocabularyFileModTime = fi.ModTime()
+ conn.lastVocabularyError = nil
+ logger.Info("vocabulary file reloaded successfully")
+ }
+ return nil
+}
+
+func (conn *Conn) loadVocabularyFile() error {
+ vf, err := os.ReadFile(conn.cluster.API.VocabularyPath)
+ if err != nil {
+ return fmt.Errorf("couldn't reading the vocabulary file: %v", err)
+ }
+ mk := make([]string, 0, len(conn.cluster.Collections.ManagedProperties))
+ for k := range conn.cluster.Collections.ManagedProperties {
+ mk = append(mk, k)
+ }
+ voc, err := arvados.NewVocabulary(vf, mk)
+ if err != nil {
+ return fmt.Errorf("while loading vocabulary file %q: %s", conn.cluster.API.VocabularyPath, err)
+ }
+ conn.vocabularyCache = voc
+ return nil
+}
+
+// LastVocabularyError returns the last error encountered while loading the
+// vocabulary file.
+// Implements health.Func
+func (conn *Conn) LastVocabularyError() error {
+ conn.maybeRefreshVocabularyCache(ctxlog.FromContext(context.Background()))
+ return conn.lastVocabularyError
+}
+
+// VocabularyGet refreshes the vocabulary cache if necessary and returns it.
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+ if conn.cluster.API.VocabularyPath == "" {
+ return arvados.Vocabulary{
+ Tags: map[string]arvados.VocabularyTag{},
+ }, nil
+ }
+ logger := ctxlog.FromContext(ctx)
+ if conn.vocabularyCache == nil {
+ // Initial load of vocabulary file.
+ err := conn.loadVocabularyFile()
+ if err != nil {
+ logger.WithError(err).Error("error loading vocabulary file")
+ return arvados.Vocabulary{}, err
+ }
+ }
+ err := conn.maybeRefreshVocabularyCache(logger)
+ if err != nil {
+ logger.WithError(err).Error("error reloading vocabulary file - ignoring")
+ }
+ return *conn.vocabularyCache, nil
+}
+
// Logout handles the logout of conn giving to the appropriate loginController
func (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
return conn.loginController.Logout(ctx, opts)
return conn.railsProxy.GroupContents(ctx, options)
}
+
+func httpErrorf(code int, format string, args ...interface{}) error {
+ return httpserver.ErrorWithStatus(fmt.Errorf(format, args...), code)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// ContainerRequestCreate defers to railsProxy for everything except
+// vocabulary checking.
+func (conn *Conn) ContainerRequestCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.ContainerRequest, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.ContainerRequest{}, err
+ }
+ resp, err := conn.railsProxy.ContainerRequestCreate(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+// ContainerRequestUpdate defers to railsProxy for everything except
+// vocabulary checking.
+func (conn *Conn) ContainerRequestUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.ContainerRequest{}, err
+ }
+ resp, err := conn.railsProxy.ContainerRequestUpdate(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ContainerRequestSuite{})
+
+type ContainerRequestSuite struct {
+ cluster *arvados.Cluster
+ localdb *Conn
+ railsSpy *arvadostest.Proxy
+}
+
+func (s *ContainerRequestSuite) TearDownSuite(c *check.C) {
+ // Undo any changes/additions to the user database so they
+ // don't affect subsequent tests.
+ arvadostest.ResetEnv()
+ c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *ContainerRequestSuite) SetUpTest(c *check.C) {
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ s.cluster, err = cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ s.localdb = NewConn(s.cluster)
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *ContainerRequestSuite) TearDownTest(c *check.C) {
+ s.railsSpy.Close()
+}
+
+func (s *ContainerRequestSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+ if testVocabulary == "" {
+ testVocabulary = `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCES": {
+ "strict": true,
+ "labels": [{"label": "Importance"}, {"label": "Priority"}],
+ "values": {
+ "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+ "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+ "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+ }
+ }
+ }
+ }`
+ }
+ voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+ c.Assert(err, check.IsNil)
+ s.localdb.vocabularyCache = voc
+ s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+
+ cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "command": []string{"echo", "foo"},
+ "container_image": "arvados/apitestfixture:latest",
+ "cwd": "/tmp",
+ "environment": map[string]string{},
+ "mounts": map[string]interface{}{
+ "/out": map[string]interface{}{
+ "kind": "tmp",
+ "capacity": 1000000,
+ },
+ },
+ "output_path": "/out",
+ "runtime_constraints": map[string]interface{}{
+ "vcpus": 1,
+ "ram": 2,
+ },
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(cnt.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
+
+func (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "command": []string{"echo", "foo"},
+ "container_image": "arvados/apitestfixture:latest",
+ "cwd": "/tmp",
+ "environment": map[string]string{},
+ "mounts": map[string]interface{}{
+ "/out": map[string]interface{}{
+ "kind": "tmp",
+ "capacity": 1000000,
+ },
+ },
+ "output_path": "/out",
+ "runtime_constraints": map[string]interface{}{
+ "vcpus": 1,
+ "ram": 2,
+ },
+ },
+ })
+ c.Assert(err, check.IsNil)
+ cnt, err = s.localdb.ContainerRequestUpdate(ctx, arvados.UpdateOptions{
+ UUID: cnt.UUID,
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(cnt.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// GroupCreate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) GroupCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Group, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.Group{}, err
+ }
+ resp, err := conn.railsProxy.GroupCreate(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+// GroupUpdate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) GroupUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Group, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.Group{}, err
+ }
+ resp, err := conn.railsProxy.GroupUpdate(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&GroupSuite{})
+
+type GroupSuite struct {
+ cluster *arvados.Cluster
+ localdb *Conn
+ railsSpy *arvadostest.Proxy
+}
+
+func (s *GroupSuite) TearDownSuite(c *check.C) {
+ // Undo any changes/additions to the user database so they
+ // don't affect subsequent tests.
+ arvadostest.ResetEnv()
+ c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *GroupSuite) SetUpTest(c *check.C) {
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ s.cluster, err = cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ s.localdb = NewConn(s.cluster)
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *GroupSuite) TearDownTest(c *check.C) {
+ s.railsSpy.Close()
+}
+
+func (s *GroupSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+ if testVocabulary == "" {
+ testVocabulary = `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCES": {
+ "strict": true,
+ "labels": [{"label": "Importance"}, {"label": "Priority"}],
+ "values": {
+ "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+ "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+ "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+ }
+ }
+ }
+ }`
+ }
+ voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+ c.Assert(err, check.IsNil)
+ s.localdb.vocabularyCache = voc
+ s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+
+ grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "group_class": "project",
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(grp.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
+
+func (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "group_class": "project",
+ },
+ })
+ c.Assert(err, check.IsNil)
+ grp, err = s.localdb.GroupUpdate(ctx, arvados.UpdateOptions{
+ UUID: grp.UUID,
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(grp.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// LinkCreate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) LinkCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Link, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.Link{}, err
+ }
+ resp, err := conn.railsProxy.LinkCreate(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+// LinkUpdate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) LinkUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Link, error) {
+ err := conn.checkProperties(ctx, opts.Attrs["properties"])
+ if err != nil {
+ return arvados.Link{}, err
+ }
+ resp, err := conn.railsProxy.LinkUpdate(ctx, opts)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&LinkSuite{})
+
+type LinkSuite struct {
+ cluster *arvados.Cluster
+ localdb *Conn
+ railsSpy *arvadostest.Proxy
+}
+
+func (s *LinkSuite) TearDownSuite(c *check.C) {
+ // Undo any changes/additions to the user database so they
+ // don't affect subsequent tests.
+ arvadostest.ResetEnv()
+ c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *LinkSuite) SetUpTest(c *check.C) {
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ s.cluster, err = cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ s.localdb = NewConn(s.cluster)
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *LinkSuite) TearDownTest(c *check.C) {
+ s.railsSpy.Close()
+}
+
+func (s *LinkSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+ if testVocabulary == "" {
+ testVocabulary = `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCES": {
+ "strict": true,
+ "labels": [{"label": "Importance"}, {"label": "Priority"}],
+ "values": {
+ "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+ "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+ "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+ }
+ }
+ }
+ }`
+ }
+ voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+ c.Assert(err, check.IsNil)
+ s.localdb.vocabularyCache = voc
+ s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+
+ lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "link_class": "star",
+ "tail_uuid": "zzzzz-j7d0g-publicfavorites",
+ "head_uuid": arvadostest.FooCollection,
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(lnk.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
+
+func (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {
+ s.setUpVocabulary(c, "")
+ ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+ tests := []struct {
+ name string
+ props map[string]interface{}
+ success bool
+ }{
+ {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+ {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+ {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+ {"Empty properties", map[string]interface{}{}, true},
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "link_class": "star",
+ "tail_uuid": "zzzzz-j7d0g-publicfavorites",
+ "head_uuid": arvadostest.FooCollection,
+ },
+ })
+ c.Assert(err, check.IsNil)
+ lnk, err = s.localdb.LinkUpdate(ctx, arvados.UpdateOptions{
+ UUID: lnk.UUID,
+ Select: []string{"uuid", "properties"},
+ Attrs: map[string]interface{}{
+ "properties": tt.props,
+ }})
+ if tt.success {
+ c.Assert(err, check.IsNil)
+ c.Assert(lnk.Properties, check.DeepEquals, tt.props)
+ } else {
+ c.Assert(err, check.NotNil)
+ }
+ }
+}
func (rtr *router) responseOptions(opts interface{}) (responseOptions, error) {
var rOpts responseOptions
switch opts := opts.(type) {
+ case *arvados.CreateOptions:
+ rOpts.Select = opts.Select
+ case *arvados.UpdateOptions:
+ rOpts.Select = opts.Select
case *arvados.GetOptions:
rOpts.Select = opts.Select
case *arvados.ListOptions:
return rtr.backend.ConfigGet(ctx)
},
},
+ {
+ arvados.EndpointVocabularyGet,
+ func() interface{} { return &struct{}{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.VocabularyGet(ctx)
+ },
+ },
{
arvados.EndpointLogin,
func() interface{} { return &arvados.LoginOptions{} },
return rtr.backend.GroupUntrash(ctx, *opts.(*arvados.UntrashOptions))
},
},
+ {
+ arvados.EndpointLinkCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LinkCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointLinkUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LinkUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointLinkList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LinkList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointLinkGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LinkGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointLinkDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LinkDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
{
arvados.EndpointSpecimenCreate,
func() interface{} { return &arvados.CreateOptions{} },
func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
uuid := arvadostest.QueuedContainerUUID
token := arvadostest.ActiveTokenV2
+ // GET
for _, sel := range [][]string{
{"uuid", "command"},
{"uuid", "command", "uuid"},
_, hasMounts := resp["mounts"]
c.Check(hasMounts, check.Equals, false)
}
+ // POST & PUT
+ uuid = arvadostest.FooCollection
+ j, err := json.Marshal([]string{"uuid", "description"})
+ c.Assert(err, check.IsNil)
+ for _, method := range []string{"PUT", "POST"} {
+ desc := "Today is " + time.Now().String()
+ reqBody := "{\"description\":\"" + desc + "\"}"
+ var resp map[string]interface{}
+ var rr *httptest.ResponseRecorder
+ if method == "PUT" {
+ _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections/"+uuid+"?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+ } else {
+ _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+ }
+ c.Check(rr.Code, check.Equals, http.StatusOK)
+ c.Check(resp["kind"], check.Equals, "arvados#collection")
+ c.Check(resp["uuid"], check.HasLen, 27)
+ c.Check(resp["description"], check.Equals, desc)
+ c.Check(resp["manifest_text"], check.IsNil)
+ }
}
func (s *RouterIntegrationSuite) TestHEAD(c *check.C) {
return resp, err
}
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+ ep := arvados.EndpointVocabularyGet
+ var resp arvados.Vocabulary
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, nil)
+ return resp, err
+}
+
func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
ep := arvados.EndpointLogin
var resp arvados.LoginResponse
return resp, err
}
+func (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+ ep := arvados.EndpointLinkCreate
+ var resp arvados.Link
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+ ep := arvados.EndpointLinkUpdate
+ var resp arvados.Link
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+ ep := arvados.EndpointLinkGet
+ var resp arvados.Link
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+ ep := arvados.EndpointLinkList
+ var resp arvados.LinkList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+ ep := arvados.EndpointLinkDelete
+ var resp arvados.Link
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
ep := arvados.EndpointSpecimenCreate
var resp arvados.Specimen
}
if pdhOnly {
- arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
+ // If we are only mounting collections by pdh, make
+ // sure we don't subscribe to websocket events to
+ // avoid putting undesired load on the API server
+ arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
} else {
arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
}
+ // the by_uuid mount point is used by singularity when writing
+ // out docker images converted to SIF
arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
- "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}})
os.RemoveAll(cr.ArvMountPoint)
cr.CleanupDirs()
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "foo,bar", "--crunchstat-interval=5",
- "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{"/out": {realTemp + "/tmp2", false}, "/tmp": {realTemp + "/tmp3", false}})
os.RemoveAll(cr.ArvMountPoint)
cr.CleanupDirs()
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
- "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}, "/etc/arvados/ca-certificates.crt": {stubCertPath, true}})
os.RemoveAll(cr.ArvMountPoint)
cr.CleanupDirs()
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
- "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{"/keeptmp": {realTemp + "/keep1/tmp0", false}})
os.RemoveAll(cr.ArvMountPoint)
cr.CleanupDirs()
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
- "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{
"/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
"/keepout": {realTemp + "/keep1/tmp0", false},
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
- "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{
"/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
"/keepout": {realTemp + "/keep1/tmp0", false},
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
- "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+ "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{
"/tmp": {realTemp + "/tmp2", false},
"/tmp/foo": {realTemp + "/keep1/tmp0", true},
var Command cmd.Handler = &installCommand{}
const devtestDatabasePassword = "insecure_arvados_test"
+const goversion = "1.17.1"
type installCommand struct {
ClusterType string
}
if !prod {
- goversion := "1.17.1"
if havegoversion, err := exec.Command("/usr/local/bin/go", "version").CombinedOutput(); err == nil && bytes.HasPrefix(havegoversion, []byte("go version go"+goversion+" ")) {
logger.Print("go " + goversion + " already installed")
} else {
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package install
+
+import (
+ "bytes"
+ "os/exec"
+ "testing"
+
+ "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+/*
+ TestExtractGoVersion tests the grep/awk command used in
+ tools/arvbox/bin/arvbox to extract the version of Go to install for
+ bootstrapping `arvados-server`.
+
+ If this test is changed, the arvbox code will also need to be updated.
+*/
+func (*Suite) TestExtractGoVersion(c *check.C) {
+ script := `
+ sourcepath="$(realpath ../..)"
+ (cd ${sourcepath} && grep 'const goversion =' lib/install/deps.go |awk -F'"' '{print $2}')
+ `
+ cmd := exec.Command("bash", "-")
+ cmd.Stdin = bytes.NewBufferString("set -ex -o pipefail\n" + script)
+ cmdOutput, err := cmd.Output()
+ c.Assert(err, check.IsNil)
+ c.Assert(string(cmdOutput), check.Equals, goversion+"\n")
+}
var (
EndpointConfigGet = APIEndpoint{"GET", "arvados/v1/config", ""}
+ EndpointVocabularyGet = APIEndpoint{"GET", "arvados/v1/vocabulary", ""}
EndpointLogin = APIEndpoint{"GET", "login", ""}
EndpointLogout = APIEndpoint{"GET", "logout", ""}
EndpointCollectionCreate = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
EndpointGroupDelete = APIEndpoint{"DELETE", "arvados/v1/groups/{uuid}", ""}
EndpointGroupTrash = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/trash", ""}
EndpointGroupUntrash = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/untrash", ""}
+ EndpointLinkCreate = APIEndpoint{"POST", "arvados/v1/links", "link"}
+ EndpointLinkUpdate = APIEndpoint{"PATCH", "arvados/v1/links/{uuid}", "link"}
+ EndpointLinkGet = APIEndpoint{"GET", "arvados/v1/links/{uuid}", ""}
+ EndpointLinkList = APIEndpoint{"GET", "arvados/v1/links", ""}
+ EndpointLinkDelete = APIEndpoint{"DELETE", "arvados/v1/links/{uuid}", ""}
EndpointUserActivate = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
EndpointUserCreate = APIEndpoint{"POST", "arvados/v1/users", "user"}
EndpointUserCurrent = APIEndpoint{"GET", "arvados/v1/users/current", ""}
type API interface {
ConfigGet(ctx context.Context) (json.RawMessage, error)
+ VocabularyGet(ctx context.Context) (Vocabulary, error)
Login(ctx context.Context, options LoginOptions) (LoginResponse, error)
Logout(ctx context.Context, options LogoutOptions) (LogoutResponse, error)
CollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)
GroupDelete(ctx context.Context, options DeleteOptions) (Group, error)
GroupTrash(ctx context.Context, options DeleteOptions) (Group, error)
GroupUntrash(ctx context.Context, options UntrashOptions) (Group, error)
+ LinkCreate(ctx context.Context, options CreateOptions) (Link, error)
+ LinkUpdate(ctx context.Context, options UpdateOptions) (Link, error)
+ LinkGet(ctx context.Context, options GetOptions) (Link, error)
+ LinkList(ctx context.Context, options ListOptions) (LinkList, error)
+ LinkDelete(ctx context.Context, options DeleteOptions) (Link, error)
SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
Admin UploadDownloadPermission
}
+type ManagedProperties map[string]struct {
+ Value interface{}
+ Function string
+ Protected bool
+}
+
type Cluster struct {
ClusterID string `json:"-"`
ManagementToken string
WebsocketClientEventQueue int
WebsocketServerEventQueue int
KeepServiceRequestTimeout Duration
+ VocabularyPath string
}
AuditLogs struct {
MaxAge Duration
UnloggedAttributes StringSet
}
Collections struct {
- BlobSigning bool
- BlobSigningKey string
- BlobSigningTTL Duration
- BlobTrash bool
- BlobTrashLifetime Duration
- BlobTrashCheckInterval Duration
- BlobTrashConcurrency int
- BlobDeleteConcurrency int
- BlobReplicateConcurrency int
- CollectionVersioning bool
- DefaultTrashLifetime Duration
- DefaultReplication int
- ManagedProperties map[string]struct {
- Value interface{}
- Function string
- Protected bool
- }
+ BlobSigning bool
+ BlobSigningKey string
+ BlobSigningTTL Duration
+ BlobTrash bool
+ BlobTrashLifetime Duration
+ BlobTrashCheckInterval Duration
+ BlobTrashConcurrency int
+ BlobDeleteConcurrency int
+ BlobReplicateConcurrency int
+ CollectionVersioning bool
+ DefaultTrashLifetime Duration
+ DefaultReplication int
+ ManagedProperties ManagedProperties
PreserveVersionIfIdle Duration
TrashSweepInterval Duration
TrustAllContent bool
Insecure bool
}
Users struct {
+ ActivatedUsersAreVisibleToOthers bool
AnonymousUserToken string
AdminNotifierEmailFrom string
AutoAdminFirstUser bool
Options map[string]struct{}
}
UserProfileFormMessage string
- VocabularyURL string
WelcomePageHTML string
InactivePageHTML string
SSHHelpPageHTML string
RuntimeUserUUID string `json:"runtime_user_uuid"`
RuntimeAuthScopes []string `json:"runtime_auth_scopes"`
RuntimeToken string `json:"runtime_token"`
+ AuthUUID string `json:"auth_uuid"`
}
// ContainerRequest is an arvados#container_request resource.
package arvados
+import "time"
+
// Link is an arvados#link record
type Link struct {
- UUID string `json:"uuid,omiempty"`
- OwnerUUID string `json:"owner_uuid"`
- Name string `json:"name"`
- LinkClass string `json:"link_class"`
- HeadUUID string `json:"head_uuid"`
- HeadKind string `json:"head_kind"`
- TailUUID string `json:"tail_uuid"`
- TailKind string `json:"tail_kind"`
- Properties map[string]interface{} `json:"properties"`
+ UUID string `json:"uuid,omitempty"`
+ Etag string `json:"etag"`
+ Href string `json:"href"`
+ OwnerUUID string `json:"owner_uuid"`
+ Name string `json:"name"`
+ LinkClass string `json:"link_class"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByClientUUID string `json:"modified_by_client_uuid"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ HeadUUID string `json:"head_uuid"`
+ HeadKind string `json:"head_kind"`
+ TailUUID string `json:"tail_uuid"`
+ TailKind string `json:"tail_kind"`
+ Properties map[string]interface{} `json:"properties"`
}
// LinkList is an arvados#linkList resource.
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+type Vocabulary struct {
+ reservedTagKeys map[string]bool `json:"-"`
+ StrictTags bool `json:"strict_tags"`
+ Tags map[string]VocabularyTag `json:"tags"`
+}
+
+type VocabularyTag struct {
+ Strict bool `json:"strict"`
+ Labels []VocabularyLabel `json:"labels"`
+ Values map[string]VocabularyTagValue `json:"values"`
+}
+
+// Cannot have a constant map in Go, so we have to use a function
+func (v *Vocabulary) systemTagKeys() map[string]bool {
+ return map[string]bool{
+ "type": true,
+ "template_uuid": true,
+ "groups": true,
+ "username": true,
+ "image_timestamp": true,
+ "docker-image-repo-tag": true,
+ "filters": true,
+ "container_request": true,
+ }
+}
+
+type VocabularyLabel struct {
+ Label string `json:"label"`
+}
+
+type VocabularyTagValue struct {
+ Labels []VocabularyLabel `json:"labels"`
+}
+
+// NewVocabulary creates a new Vocabulary from a JSON definition and a list
+// of reserved tag keys that will get special treatment when strict mode is
+// enabled.
+func NewVocabulary(data []byte, managedTagKeys []string) (voc *Vocabulary, err error) {
+ if r := bytes.Compare(data, []byte("")); r == 0 {
+ return &Vocabulary{}, nil
+ }
+ err = json.Unmarshal(data, &voc)
+ if err != nil {
+ return nil, fmt.Errorf("invalid JSON format error: %q", err)
+ }
+ if reflect.DeepEqual(voc, &Vocabulary{}) {
+ return nil, fmt.Errorf("JSON data provided doesn't match Vocabulary format: %q", data)
+ }
+ voc.reservedTagKeys = make(map[string]bool)
+ for _, managedKey := range managedTagKeys {
+ voc.reservedTagKeys[managedKey] = true
+ }
+ for systemKey := range voc.systemTagKeys() {
+ voc.reservedTagKeys[systemKey] = true
+ }
+ err = voc.validate()
+ if err != nil {
+ return nil, err
+ }
+ return voc, nil
+}
+
+func (v *Vocabulary) validate() error {
+ if v == nil {
+ return nil
+ }
+ tagKeys := map[string]string{}
+ // Checks for Vocabulary strictness
+ if v.StrictTags && len(v.Tags) == 0 {
+ return fmt.Errorf("vocabulary is strict but no tags are defined")
+ }
+ // Checks for collisions between tag keys, reserved tag keys
+ // and tag key labels.
+ for key := range v.Tags {
+ if v.reservedTagKeys[key] {
+ return fmt.Errorf("tag key %q is reserved", key)
+ }
+ lcKey := strings.ToLower(key)
+ if tagKeys[lcKey] != "" {
+ return fmt.Errorf("duplicate tag key %q", key)
+ }
+ tagKeys[lcKey] = key
+ for _, lbl := range v.Tags[key].Labels {
+ label := strings.ToLower(lbl.Label)
+ if tagKeys[label] != "" {
+ return fmt.Errorf("tag label %q for key %q already seen as a tag key or label", lbl.Label, key)
+ }
+ tagKeys[label] = lbl.Label
+ }
+ // Checks for value strictness
+ if v.Tags[key].Strict && len(v.Tags[key].Values) == 0 {
+ return fmt.Errorf("tag key %q is configured as strict but doesn't provide values", key)
+ }
+ // Checks for collisions between tag values and tag value labels.
+ tagValues := map[string]string{}
+ for val := range v.Tags[key].Values {
+ lcVal := strings.ToLower(val)
+ if tagValues[lcVal] != "" {
+ return fmt.Errorf("duplicate tag value %q for tag %q", val, key)
+ }
+ // Checks for collisions between labels from different values.
+ tagValues[lcVal] = val
+ for _, tagLbl := range v.Tags[key].Values[val].Labels {
+ label := strings.ToLower(tagLbl.Label)
+ if tagValues[label] != "" && tagValues[label] != val {
+ return fmt.Errorf("tag value label %q for pair (%q:%q) already seen on value %q", tagLbl.Label, key, val, tagValues[label])
+ }
+ tagValues[label] = val
+ }
+ }
+ }
+ return nil
+}
+
+func (v *Vocabulary) getLabelsToKeys() (labels map[string]string) {
+ if v == nil {
+ return
+ }
+ labels = make(map[string]string)
+ for key, val := range v.Tags {
+ for _, lbl := range val.Labels {
+ label := strings.ToLower(lbl.Label)
+ labels[label] = key
+ }
+ }
+ return labels
+}
+
+func (v *Vocabulary) getLabelsToValues(key string) (labels map[string]string) {
+ if v == nil {
+ return
+ }
+ labels = make(map[string]string)
+ if _, ok := v.Tags[key]; ok {
+ for val := range v.Tags[key].Values {
+ labels[strings.ToLower(val)] = val
+ for _, tagLbl := range v.Tags[key].Values[val].Labels {
+ label := strings.ToLower(tagLbl.Label)
+ labels[label] = val
+ }
+ }
+ }
+ return labels
+}
+
+func (v *Vocabulary) checkValue(key, val string) error {
+ if _, ok := v.Tags[key].Values[val]; !ok {
+ lcVal := strings.ToLower(val)
+ correctValue, ok := v.getLabelsToValues(key)[lcVal]
+ if ok {
+ return fmt.Errorf("tag value %q for key %q is an alias, must be provided as %q", val, key, correctValue)
+ } else if v.Tags[key].Strict {
+ return fmt.Errorf("tag value %q is not valid for key %q", val, key)
+ }
+ }
+ return nil
+}
+
+// Check validates the given data against the vocabulary.
+func (v *Vocabulary) Check(data map[string]interface{}) error {
+ if v == nil {
+ return nil
+ }
+ for key, val := range data {
+ // Checks for key validity
+ if v.reservedTagKeys[key] {
+ // Allow reserved keys to be used even if they are not defined in
+ // the vocabulary no matter its strictness.
+ continue
+ }
+ if _, ok := v.Tags[key]; !ok {
+ lcKey := strings.ToLower(key)
+ correctKey, ok := v.getLabelsToKeys()[lcKey]
+ if ok {
+ return fmt.Errorf("tag key %q is an alias, must be provided as %q", key, correctKey)
+ } else if v.StrictTags {
+ return fmt.Errorf("tag key %q is not defined in the vocabulary", key)
+ }
+ // If the key is not defined, we don't need to check the value
+ continue
+ }
+ // Checks for value validity -- key is defined
+ switch val := val.(type) {
+ case string:
+ err := v.checkValue(key, val)
+ if err != nil {
+ return err
+ }
+ case []interface{}:
+ for _, singleVal := range val {
+ switch singleVal := singleVal.(type) {
+ case string:
+ err := v.checkValue(key, singleVal)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("value list element type for tag key %q was %T, but expected a string", key, singleVal)
+ }
+ }
+ default:
+ return fmt.Errorf("value type for tag key %q was %T, but expected a string or list of strings", key, val)
+ }
+ }
+ return nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "encoding/json"
+
+ check "gopkg.in/check.v1"
+)
+
+type VocabularySuite struct {
+ testVoc *Vocabulary
+}
+
+var _ = check.Suite(&VocabularySuite{})
+
+func (s *VocabularySuite) SetUpTest(c *check.C) {
+ s.testVoc = &Vocabulary{
+ reservedTagKeys: map[string]bool{
+ "reservedKey": true,
+ },
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVALANIMAL1": {
+ Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Homo sapiens"}},
+ },
+ "IDVALANIMAL2": {
+ Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Loxodonta"}},
+ },
+ },
+ },
+ "IDTAGIMPORTANCE": {
+ Strict: true,
+ Labels: []VocabularyLabel{{Label: "Importance"}, {Label: "Priority"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVAL3": {
+ Labels: []VocabularyLabel{{Label: "Low"}, {Label: "Low priority"}},
+ },
+ "IDVAL2": {
+ Labels: []VocabularyLabel{{Label: "Medium"}, {Label: "Medium priority"}},
+ },
+ "IDVAL1": {
+ Labels: []VocabularyLabel{{Label: "High"}, {Label: "High priority"}},
+ },
+ },
+ },
+ "IDTAGCOMMENT": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Comment"}},
+ },
+ },
+ }
+ err := s.testVoc.validate()
+ c.Assert(err, check.IsNil)
+}
+
+func (s *VocabularySuite) TestCheck(c *check.C) {
+ tests := []struct {
+ name string
+ strictVoc bool
+ props string
+ expectSuccess bool
+ errMatches string
+ }{
+ // Check succeeds
+ {
+ "Known key, known value",
+ false,
+ `{"IDTAGANIMALS":"IDVALANIMAL1"}`,
+ true,
+ "",
+ },
+ {
+ "Unknown non-alias key on non-strict vocabulary",
+ false,
+ `{"foo":"bar"}`,
+ true,
+ "",
+ },
+ {
+ "Known non-strict key, unknown non-alias value",
+ false,
+ `{"IDTAGANIMALS":"IDVALANIMAL3"}`,
+ true,
+ "",
+ },
+ {
+ "Undefined but reserved key on strict vocabulary",
+ true,
+ `{"reservedKey":"bar"}`,
+ true,
+ "",
+ },
+ {
+ "Known key, list of known values",
+ false,
+ `{"IDTAGANIMALS":["IDVALANIMAL1","IDVALANIMAL2"]}`,
+ true,
+ "",
+ },
+ {
+ "Known non-strict key, list of unknown non-alias values",
+ false,
+ `{"IDTAGCOMMENT":["hello world","lorem ipsum"]}`,
+ true,
+ "",
+ },
+ // Check fails
+ {
+ "Known first key & value; known 2nd key, unknown 2nd value",
+ false,
+ `{"IDTAGANIMALS":"IDVALANIMAL1", "IDTAGIMPORTANCE": "blah blah"}`,
+ false,
+ "tag value.*is not valid for key.*",
+ },
+ {
+ "Unknown non-alias key on strict vocabulary",
+ true,
+ `{"foo":"bar"}`,
+ false,
+ "tag key.*is not defined in the vocabulary",
+ },
+ {
+ "Known non-strict key, known value alias",
+ false,
+ `{"IDTAGANIMALS":"Loxodonta"}`,
+ false,
+ "tag value.*for key.* is an alias, must be provided as.*",
+ },
+ {
+ "Known strict key, unknown non-alias value",
+ false,
+ `{"IDTAGIMPORTANCE":"Unimportant"}`,
+ false,
+ "tag value.*is not valid for key.*",
+ },
+ {
+ "Known strict key, lowercase value regarded as alias",
+ false,
+ `{"IDTAGIMPORTANCE":"idval1"}`,
+ false,
+ "tag value.*for key.* is an alias, must be provided as.*",
+ },
+ {
+ "Known strict key, known value alias",
+ false,
+ `{"IDTAGIMPORTANCE":"High"}`,
+ false,
+ "tag value.* for key.*is an alias, must be provided as.*",
+ },
+ {
+ "Known strict key, list of known alias values",
+ false,
+ `{"IDTAGIMPORTANCE":["High", "Low"]}`,
+ false,
+ "tag value.*for key.*is an alias, must be provided as.*",
+ },
+ {
+ "Known strict key, list of unknown non-alias values",
+ false,
+ `{"IDTAGIMPORTANCE":["foo","bar"]}`,
+ false,
+ "tag value.*is not valid for key.*",
+ },
+ {
+ "Invalid value type",
+ false,
+ `{"IDTAGANIMALS":1}`,
+ false,
+ "value type for tag key.* was.*, but expected a string or list of strings",
+ },
+ {
+ "Value list of invalid type",
+ false,
+ `{"IDTAGANIMALS":[1]}`,
+ false,
+ "value list element type for tag key.* was.*, but expected a string",
+ },
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ s.testVoc.StrictTags = tt.strictVoc
+
+ var data map[string]interface{}
+ err := json.Unmarshal([]byte(tt.props), &data)
+ c.Assert(err, check.IsNil)
+ err = s.testVoc.Check(data)
+ if tt.expectSuccess {
+ c.Assert(err, check.IsNil)
+ } else {
+ c.Assert(err, check.NotNil)
+ c.Assert(err.Error(), check.Matches, tt.errMatches)
+ }
+ }
+}
+
+func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
+ tests := []struct {
+ name string
+ data string
+ isValid bool
+ errMatches string
+ expect *Vocabulary
+ }{
+ {"Empty data", "", true, "", &Vocabulary{}},
+ {"Invalid JSON", "foo", false, "invalid JSON format.*", nil},
+ {"Valid, empty JSON", "{}", false, ".*doesn't match Vocabulary format.*", nil},
+ {"Valid JSON, wrong data", `{"foo":"bar"}`, false, ".*doesn't match Vocabulary format.*", nil},
+ {
+ "Simple valid example",
+ `{"tags":{
+ "IDTAGANIMALS":{
+ "strict": false,
+ "labels": [{"label": "Animal"}, {"label": "Creature"}],
+ "values": {
+ "IDVALANIMAL1":{"labels":[{"label":"Human"}, {"label":"Homo sapiens"}]},
+ "IDVALANIMAL2":{"labels":[{"label":"Elephant"}, {"label":"Loxodonta"}]},
+ "DOG":{"labels":[{"label":"Dog"}, {"label":"Canis lupus familiaris"}, {"label":"dOg"}]}
+ }
+ }
+ }}`,
+ true, "",
+ &Vocabulary{
+ reservedTagKeys: map[string]bool{
+ "type": true,
+ "template_uuid": true,
+ "groups": true,
+ "username": true,
+ "image_timestamp": true,
+ "docker-image-repo-tag": true,
+ "filters": true,
+ "container_request": true,
+ },
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVALANIMAL1": {
+ Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Homo sapiens"}},
+ },
+ "IDVALANIMAL2": {
+ Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Loxodonta"}},
+ },
+ "DOG": {
+ Labels: []VocabularyLabel{{Label: "Dog"}, {Label: "Canis lupus familiaris"}, {Label: "dOg"}},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ "Valid data, but uses reserved key",
+ `{"tags":{
+ "type":{
+ "strict": false,
+ "labels": [{"label": "Type"}]
+ }
+ }}`,
+ false, "tag key.*is reserved", nil,
+ },
+ }
+
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ voc, err := NewVocabulary([]byte(tt.data), []string{})
+ if tt.isValid {
+ c.Assert(err, check.IsNil)
+ } else {
+ c.Assert(err, check.NotNil)
+ if tt.errMatches != "" {
+ c.Assert(err, check.ErrorMatches, tt.errMatches)
+ }
+ }
+ c.Assert(voc, check.DeepEquals, tt.expect)
+ }
+}
+
+func (s *VocabularySuite) TestValidationErrors(c *check.C) {
+ tests := []struct {
+ name string
+ voc *Vocabulary
+ errMatches string
+ }{
+ {
+ "Strict vocabulary, no keys",
+ &Vocabulary{
+ StrictTags: true,
+ },
+ "vocabulary is strict but no tags are defined",
+ },
+ {
+ "Collision between tag key and tag key label",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ },
+ "IDTAGCOMMENT": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "IDTAGANIMALS"}},
+ },
+ },
+ },
+ "", // Depending on how the map is sorted, this could be one of two errors
+ },
+ {
+ "Collision between tag key and tag key label (case-insensitive)",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ },
+ "IDTAGCOMMENT": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "IdTagAnimals"}},
+ },
+ },
+ },
+ "", // Depending on how the map is sorted, this could be one of two errors
+ },
+ {
+ "Collision between tag key labels",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ },
+ "IDTAGCOMMENT": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "Animal"}},
+ },
+ },
+ },
+ "tag label.*for key.*already seen.*",
+ },
+ {
+ "Collision between tag value and tag value label",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVALANIMAL1": {
+ Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+ },
+ "IDVALANIMAL2": {
+ Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "IDVALANIMAL1"}},
+ },
+ },
+ },
+ },
+ },
+ "", // Depending on how the map is sorted, this could be one of two errors
+ },
+ {
+ "Collision between tag value and tag value label (case-insensitive)",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVALANIMAL1": {
+ Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+ },
+ "IDVALANIMAL2": {
+ Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "IDValAnimal1"}},
+ },
+ },
+ },
+ },
+ },
+ "", // Depending on how the map is sorted, this could be one of two errors
+ },
+ {
+ "Collision between tag value labels",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVALANIMAL1": {
+ Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+ },
+ "IDVALANIMAL2": {
+ Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Mammal"}},
+ },
+ },
+ },
+ },
+ },
+ "tag value label.*for pair.*already seen.*on value.*",
+ },
+ {
+ "Collision between tag value labels (case-insensitive)",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: false,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ Values: map[string]VocabularyTagValue{
+ "IDVALANIMAL1": {
+ Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+ },
+ "IDVALANIMAL2": {
+ Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "mAMMAL"}},
+ },
+ },
+ },
+ },
+ },
+ "tag value label.*for pair.*already seen.*on value.*",
+ },
+ {
+ "Strict tag key, with no values",
+ &Vocabulary{
+ StrictTags: false,
+ Tags: map[string]VocabularyTag{
+ "IDTAGANIMALS": {
+ Strict: true,
+ Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+ },
+ },
+ },
+ "tag key.*is configured as strict but doesn't provide values",
+ },
+ }
+ for _, tt := range tests {
+ c.Log(c.TestName()+" ", tt.name)
+ err := tt.voc.validate()
+ c.Assert(err, check.NotNil)
+ if tt.errMatches != "" {
+ c.Assert(err, check.ErrorMatches, tt.errMatches)
+ }
+ }
+}
as.appendCall(ctx, as.ConfigGet, nil)
return nil, as.Error
}
+func (as *APIStub) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+ as.appendCall(ctx, as.VocabularyGet, nil)
+ return arvados.Vocabulary{}, as.Error
+}
func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
as.appendCall(ctx, as.Login, options)
return arvados.LoginResponse{}, as.Error
as.appendCall(ctx, as.GroupUntrash, options)
return arvados.Group{}, as.Error
}
+func (as *APIStub) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+ as.appendCall(ctx, as.LinkCreate, options)
+ return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+ as.appendCall(ctx, as.LinkUpdate, options)
+ return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+ as.appendCall(ctx, as.LinkGet, options)
+ return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+ as.appendCall(ctx, as.LinkList, options)
+ return arvados.LinkList{}, as.Error
+}
+func (as *APIStub) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+ as.appendCall(ctx, as.LinkDelete, options)
+ return arvados.Link{}, as.Error
+}
func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
as.appendCall(ctx, as.SpecimenCreate, options)
return arvados.Specimen{}, as.Error
storage_classes=None,
trash_at=None,
merge=True,
- num_retries=None):
+ num_retries=None,
+ preserve_version=False):
"""Save collection to an existing collection record.
Commit pending buffer blocks to Keep, merge with remote record (if
:num_retries:
Retry count on API calls (if None, use the collection default)
+ :preserve_version:
+ If True, indicate that the collection content being saved right now
+ should be preserved in a version snapshot if the collection record is
+ updated in the future. Requires that the API server has
+ Collections.CollectionVersioning enabled, if not, setting this will
+ raise an exception.
+
"""
if properties and type(properties) is not dict:
raise errors.ArgumentError("properties must be dictionary type.")
if trash_at and type(trash_at) is not datetime.datetime:
raise errors.ArgumentError("trash_at must be datetime type.")
+ if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):
+ raise errors.ArgumentError("preserve_version is not supported when CollectionVersioning is not enabled.")
+
body={}
if properties:
body["properties"] = properties
if trash_at:
t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
body["trash_at"] = t
+ if preserve_version:
+ body["preserve_version"] = preserve_version
if not self.committed():
if self._has_remote_blocks:
storage_classes=None,
trash_at=None,
ensure_unique_name=False,
- num_retries=None):
+ num_retries=None,
+ preserve_version=False):
"""Save collection to a new collection record.
Commit pending buffer blocks to Keep and, when create_collection_record
:num_retries:
Retry count on API calls (if None, use the collection default)
+ :preserve_version:
+ If True, indicate that the collection content being saved right now
+ should be preserved in a version snapshot if the collection record is
+ updated in the future. Requires that the API server has
+ Collections.CollectionVersioning enabled, if not, setting this will
+ raise an exception.
+
"""
if properties and type(properties) is not dict:
raise errors.ArgumentError("properties must be dictionary type.")
if trash_at and type(trash_at) is not datetime.datetime:
raise errors.ArgumentError("trash_at must be datetime type.")
+ if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):
+ raise errors.ArgumentError("preserve_version is not supported when CollectionVersioning is not enabled.")
+
if self._has_remote_blocks:
# Copy any remote blocks to the local cluster.
self._copy_remote_blocks(remote_blocks={})
if trash_at:
t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
body["trash_at"] = t
+ if preserve_version:
+ body["preserve_version"] = preserve_version
self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))
text = self._api_response["manifest_text"]
"UserProfileNotificationAddress": "arvados@example.com",
},
"Collections": {
+ "CollectionVersioning": True,
"BlobSigningKey": "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc",
"TrustAllContent": False,
"ForwardSlashNameSubstitution": "/",
class NewCollectionTestCaseWithServers(run_test_server.TestCaseWithServers):
+ def test_preserve_version_on_save(self):
+ c = Collection()
+ c.save_new(preserve_version=True)
+ coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+ self.assertEqual(coll_record['version'], 1)
+ self.assertEqual(coll_record['preserve_version'], True)
+ with c.open("foo.txt", "wb") as foo:
+ foo.write(b"foo")
+ c.save(preserve_version=True)
+ coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+ self.assertEqual(coll_record['version'], 2)
+ self.assertEqual(coll_record['preserve_version'], True)
+ with c.open("bar.txt", "wb") as foo:
+ foo.write(b"bar")
+ c.save(preserve_version=False)
+ coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+ self.assertEqual(coll_record['version'], 3)
+ self.assertEqual(coll_record['preserve_version'], False)
+
def test_get_manifest_text_only_committed(self):
c = Collection()
with c.open("count.txt", "wb") as f:
metaclass (0.0.4)
method_source (1.0.0)
mini_mime (1.1.0)
- mini_portile2 (2.5.3)
+ mini_portile2 (2.6.1)
minitest (5.10.3)
mocha (1.8.0)
metaclass (~> 0.0.1)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.5.7)
- nokogiri (1.11.7)
- mini_portile2 (~> 2.5.0)
+ nokogiri (1.12.5)
+ mini_portile2 (~> 2.6.1)
racc (~> 1.4)
oj (3.9.2)
optimist (3.0.0)
pg (1.1.4)
power_assert (1.1.4)
public_suffix (4.0.6)
- racc (1.5.2)
+ racc (1.6.0)
rack (2.2.3)
rack-test (1.1.0)
rack (>= 1.0, < 3)
name: 'can_read').empty?
# Add can_read link from this user to "all users" which makes this
- # user "invited"
- group_perm = create_user_group_link
+ # user "invited", and (depending on config) a link in the opposite
+ # direction which makes this user visible to other users.
+ group_perms = add_to_all_users_group
# Add git repo
repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
forget_cached_group_perms
- return [repo_perm, vm_login_perm, group_perm, self].compact
+ return [repo_perm, vm_login_perm, *group_perms, self].compact
end
# delete user signatures, login, repo, and vm perms, and mark as inactive
login_perm
end
- # add the user to the 'All users' group
- def create_user_group_link
- return (Link.where(tail_uuid: self.uuid,
+ def add_to_all_users_group
+ resp = [Link.where(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
link_class: 'permission',
- name: 'can_read').first or
+ name: 'can_read').first ||
Link.create(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
link_class: 'permission',
- name: 'can_read'))
+ name: 'can_read')]
+ if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers
+ resp += [Link.where(tail_uuid: all_users_group_uuid,
+ head_uuid: self.uuid,
+ link_class: 'permission',
+ name: 'can_read').first ||
+ Link.create(tail_uuid: all_users_group_uuid,
+ head_uuid: self.uuid,
+ link_class: 'permission',
+ name: 'can_read')]
+ end
+ return resp
end
# Give the special "System group" permission to manage this user and
@initial_link_count = Link.count
@vm_uuid = virtual_machines(:testvm).uuid
ActionMailer::Base.deliveries = []
+ Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
end
test "activate a user after signing UA" do
end
test "manager user gets permission to minions' articles via can_manage link" do
+ Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
manager = create :active_user, first_name: "Manage", last_name: "Er"
minion = create :active_user, first_name: "Min", last_name: "Ion"
minions_specimen = act_as_user minion do
end
test "users with bidirectional read permission in group can see each other, but cannot see each other's private articles" do
+ Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
a = create :active_user, first_name: "A"
b = create :active_user, first_name: "B"
other = create :active_user, first_name: "OTHER"
assert_not_allowed { User.new.save }
end
- test "setup new user" do
- set_user_from_auth :admin
+ [true, false].each do |visible|
+ test "setup new user with ActivatedUsersAreVisibleToOthers=#{visible}" do
+ Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = visible
+ set_user_from_auth :admin
- email = 'foo@example.com'
+ email = 'foo@example.com'
- user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+ user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
- vm = VirtualMachine.create
+ vm = VirtualMachine.create
- response = user.setup(repo_name: 'foo/testrepo',
- vm_uuid: vm.uuid)
+ response = user.setup(repo_name: 'foo/testrepo',
+ vm_uuid: vm.uuid)
- resp_user = find_obj_in_resp response, 'User'
- verify_user resp_user, email
+ resp_user = find_obj_in_resp response, 'User'
+ verify_user resp_user, email
- group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+ verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
- repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
- verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+ group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+ if visible
+ verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+ else
+ assert_nil group_perm2
+ end
- vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
- verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
- assert_equal("foo", vm_perm.properties["username"])
+ repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+ verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+ vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+ verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+ assert_equal("foo", vm_perm.properties["username"])
+ end
end
test "setup new user with junk in database" do
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+ verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+
# invoke setup again with repo_name
response = user.setup(repo_name: 'foo/testrepo')
resp_user = find_obj_in_resp response, 'User', nil
break
end
else # looking for a link
- if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+ if ArvadosModel::resource_class_for_uuid(x['head_uuid']).andand.kind == head_kind
return_obj = x
break
end
usr = self.api.users().current().execute(num_retries=self.args.retries)
now = time.time()
dir_class = None
- dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries]
+ dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
mount_readme = False
storage_classes = None
return
e = self.operations.inodes.add_entry(Directory(
- llfuse.ROOT_INODE, self.operations.inodes, self.api.config))
+ llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
dir_args[0] = e.inode
for name in self.args.mount_by_id:
and the value referencing a File or Directory object.
"""
- def __init__(self, parent_inode, inodes, apiconfig):
+ def __init__(self, parent_inode, inodes, apiconfig, enable_write):
"""parent_inode is the integer inode number"""
super(Directory, self).__init__()
self.apiconfig = apiconfig
self._entries = {}
self._mtime = time.time()
+ self._enable_write = enable_write
def forward_slash_subst(self):
if not hasattr(self, '_fsns'):
"""
- def __init__(self, parent_inode, inodes, apiconfig, collection):
- super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig)
+ def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection):
+ super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
self.apiconfig = apiconfig
self.collection = collection
item.fuse_entry.dead = False
self._entries[name] = item.fuse_entry
elif isinstance(item, arvados.collection.RichCollectionBase):
- self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, item))
+ self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item))
self._entries[name].populate(mtime)
else:
- self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime))
+ self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
item.fuse_entry = self._entries[name]
def on_event(self, event, collection, name, item):
self.new_entry(entry, item, self.mtime())
def writable(self):
- return self.collection.writable()
+ return self._enable_write and self.collection.writable()
@use_counter
def flush(self):
+ if not self.writable():
+ return
with llfuse.lock_released:
self.collection.root_collection().save()
@use_counter
@check_update
def create(self, name):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
with llfuse.lock_released:
self.collection.open(name, "w").close()
@use_counter
@check_update
def mkdir(self, name):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
with llfuse.lock_released:
self.collection.mkdirs(name)
@use_counter
@check_update
def unlink(self, name):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
with llfuse.lock_released:
self.collection.remove(name)
self.flush()
@use_counter
@check_update
def rmdir(self, name):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
with llfuse.lock_released:
self.collection.remove(name)
self.flush()
@use_counter
@check_update
def rename(self, name_old, name_new, src):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
+
if not isinstance(src, CollectionDirectoryBase):
raise llfuse.FUSEError(errno.EPERM)
class CollectionDirectory(CollectionDirectoryBase):
"""Represents the root of a directory tree representing a collection."""
- def __init__(self, parent_inode, inodes, api, num_retries, collection_record=None, explicit_collection=None):
- super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, None)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
+ super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None)
self.api = api
self.num_retries = num_retries
self.collection_record_file = None
self._mtime = 0
self._manifest_size = 0
if self.collection_locator:
- self._writable = (uuid_pattern.match(self.collection_locator) is not None)
+ self._writable = (uuid_pattern.match(self.collection_locator) is not None) and enable_write
self._updating_lock = threading.Lock()
def same(self, i):
return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
def writable(self):
- return self.collection.writable() if self.collection is not None else self._writable
+ return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)
def want_event_subscribe(self):
return (uuid_pattern.match(self.collection_locator) is not None)
def save_new(self):
pass
- def __init__(self, parent_inode, inodes, api_client, num_retries, storage_classes=None):
+ def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
collection = self.UnsaveableCollection(
api_client=api_client,
keep_client=api_client.keep,
num_retries=num_retries,
storage_classes_desired=storage_classes)
+ # This is always enable_write=True because it never tries to
+ # save to the backend
super(TmpCollectionDirectory, self).__init__(
- parent_inode, inodes, api_client.config, collection)
+ parent_inode, inodes, api_client.config, True, collection)
self.collection_record_file = None
self.populate(self.mtime())
""".lstrip()
- def __init__(self, parent_inode, inodes, api, num_retries, pdh_only=False, storage_classes=None):
- super(MagicDirectory, self).__init__(parent_inode, inodes, api.config)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
+ super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
self.api = api
self.num_retries = num_retries
self.pdh_only = pdh_only
# If we're the root directory, add an identical by_id subdirectory.
if self.inode == llfuse.ROOT_INODE:
self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self.pdh_only))
+ self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+ self.pdh_only))
def __contains__(self, k):
if k in self._entries:
if project[u'items_available'] == 0:
return False
e = self.inodes.add_entry(ProjectDirectory(
- self.inode, self.inodes, self.api, self.num_retries,
+ self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
project[u'items'][0], storage_classes=self.storage_classes))
else:
e = self.inodes.add_entry(CollectionDirectory(
- self.inode, self.inodes, self.api, self.num_retries, k))
+ self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
if e.update():
if k not in self._entries:
class TagsDirectory(Directory):
"""A special directory that contains as subdirectories all tags visible to the user."""
- def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
- super(TagsDirectory, self).__init__(parent_inode, inodes, api.config)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
+ super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
self.api = api
self.num_retries = num_retries
self._poll = True
self.merge(tags['items']+[{"name": n} for n in self._extra],
lambda i: i['name'],
lambda a, i: a.tag == i['name'],
- lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+ lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+ i['name'], poll=self._poll, poll_time=self._poll_time))
@use_counter
@check_update
to the user that are tagged with a particular tag.
"""
- def __init__(self, parent_inode, inodes, api, num_retries, tag,
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
poll=False, poll_time=60):
- super(TagDirectory, self).__init__(parent_inode, inodes, api.config)
+ super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
self.api = api
self.num_retries = num_retries
self.tag = tag
self.merge(taggedcollections['items'],
lambda i: i['head_uuid'],
lambda a, i: a.collection_locator == i['head_uuid'],
- lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+ lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
class ProjectDirectory(Directory):
"""A special directory that contains the contents of a project."""
- def __init__(self, parent_inode, inodes, api, num_retries, project_object,
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
poll=True, poll_time=3, storage_classes=None):
- super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config)
+ super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
self.api = api
self.num_retries = num_retries
self.project_object = project_object
def createDirectory(self, i):
if collection_uuid_pattern.match(i['uuid']):
- return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+ return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
elif group_uuid_pattern.match(i['uuid']):
- return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time, self.storage_classes)
+ return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+ i, self._poll, self._poll_time, self.storage_classes)
elif link_uuid_pattern.match(i['uuid']):
if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
- return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+ return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
else:
return None
elif uuid_pattern.match(i['uuid']):
@use_counter
@check_update
def writable(self):
+ if not self._enable_write:
+ return False
with llfuse.lock_released:
if not self._current_user:
self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
@use_counter
@check_update
def mkdir(self, name):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
+
try:
with llfuse.lock_released:
c = {
@use_counter
@check_update
def rmdir(self, name):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
+
if name not in self:
raise llfuse.FUSEError(errno.ENOENT)
if not isinstance(self[name], CollectionDirectory):
@use_counter
@check_update
def rename(self, name_old, name_new, src):
+ if not self.writable():
+ raise llfuse.FUSEError(errno.EROFS)
+
if not isinstance(src, ProjectDirectory):
raise llfuse.FUSEError(errno.EPERM)
class SharedDirectory(Directory):
"""A special directory that represents users or groups who have shared projects with me."""
- def __init__(self, parent_inode, inodes, api, num_retries, exclude,
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
poll=False, poll_time=60, storage_classes=None):
- super(SharedDirectory, self).__init__(parent_inode, inodes, api.config)
+ super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
self.api = api
self.num_retries = num_retries
self.current_user = api.users().current().execute(num_retries=num_retries)
self.merge(contents.items(),
lambda i: i[0],
lambda a, i: a.uuid() == i[1]['uuid'],
- lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+ lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+ i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
except Exception:
_logger.exception("arv-mount shared dir error")
finally:
class FuseArvadosFile(File):
"""Wraps a ArvadosFile."""
- __slots__ = ('arvfile',)
+ __slots__ = ('arvfile', '_enable_write')
- def __init__(self, parent_inode, arvfile, _mtime):
+ def __init__(self, parent_inode, arvfile, _mtime, enable_write):
super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
self.arvfile = arvfile
+ self._enable_write = enable_write
def size(self):
with llfuse.lock_released:
return False
def writable(self):
- return self.arvfile.writable()
+ return self._enable_write and self.arvfile.writable()
def flush(self):
with llfuse.lock_released:
llfuse.close()
def make_mount(self, root_class, **root_kwargs):
+ enable_write = True
+ if 'enable_write' in root_kwargs:
+ enable_write = root_kwargs.pop('enable_write')
self.operations = fuse.Operations(
os.getuid(), os.getgid(),
api_client=self.api,
- enable_write=True)
+ enable_write=enable_write)
self.operations.inodes.add_entry(root_class(
- llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
+ llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
llfuse.init(self.operations, self.mounttmp, [])
self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
self.llfuse_thread.daemon = True
class SanitizeFilenameTest(MountTestBase):
def test_sanitize_filename(self):
- pdir = fuse.ProjectDirectory(1, {}, self.api, 0, project_object=self.api.users().current().execute())
+ pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
acceptable = [
"foo.txt",
".foo",
@staticmethod
def _test_collection_custom_storage_classes(self, coll):
self.assertEqual(storage_classes_desired(coll), ['foo'])
+
+def _readonlyCollectionTestHelper(mounttmp):
+ f = open(os.path.join(mounttmp, 'thing1.txt'), 'rt')
+ # Testing that close() doesn't raise an error.
+ f.close()
+
+class ReadonlyCollectionTest(MountTestBase):
+ def setUp(self):
+ super(ReadonlyCollectionTest, self).setUp()
+ cw = arvados.collection.Collection()
+ with cw.open('thing1.txt', 'wt') as f:
+ f.write("data 1")
+ cw.save_new(owner_uuid=run_test_server.fixture("groups")["aproject"]["uuid"])
+ self.testcollection = cw.api_response()
+
+ def runTest(self):
+ settings = arvados.config.settings().copy()
+ settings["ARVADOS_API_TOKEN"] = run_test_server.fixture("api_client_authorizations")["project_viewer"]["api_token"]
+ self.api = arvados.safeapi.ThreadSafeApiCache(settings)
+ self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection, enable_write=False)
+
+ self.pool.apply(_readonlyCollectionTestHelper, (self.mounttmp,))
if err != nil {
return giveup("opening %q: %s", udir, err)
}
+ defer d.Close()
uuids, err := d.Readdirnames(0)
if err != nil {
return giveup("reading %q: %s", udir, err)
return fmt.Errorf("error creating directory %s: %s", bdir, err)
}
- tmpfile, tmperr := v.os.TempFile(bdir, "tmp"+loc)
- if tmperr != nil {
- return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, tmperr)
- }
-
bpath := v.blockPath(loc)
+ tmpfile, err := v.os.TempFile(bdir, "tmp"+loc)
+ if err != nil {
+ return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, err)
+ }
+ defer v.os.Remove(tmpfile.Name())
+ defer tmpfile.Close()
- if err := v.lock(ctx); err != nil {
+ if err = v.lock(ctx); err != nil {
return err
}
defer v.unlock()
n, err := io.Copy(tmpfile, rdr)
v.os.stats.TickOutBytes(uint64(n))
if err != nil {
- err = fmt.Errorf("error writing %s: %s", bpath, err)
- tmpfile.Close()
- v.os.Remove(tmpfile.Name())
- return err
+ return fmt.Errorf("error writing %s: %s", bpath, err)
}
- if err := tmpfile.Close(); err != nil {
- err = fmt.Errorf("error closing %s: %s", tmpfile.Name(), err)
- v.os.Remove(tmpfile.Name())
- return err
+ if err = tmpfile.Close(); err != nil {
+ return fmt.Errorf("error closing %s: %s", tmpfile.Name(), err)
}
// ext4 uses a low-precision clock and effectively backdates
// files by up to 10 ms, sometimes across a 1-second boundary,
v.os.stats.TickOps("utimes")
v.os.stats.Tick(&v.os.stats.UtimesOps)
if err = os.Chtimes(tmpfile.Name(), ts, ts); err != nil {
- err = fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
- v.os.Remove(tmpfile.Name())
- return err
+ return fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
}
- if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
- err = fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
- v.os.Remove(tmpfile.Name())
- return err
+ if err = v.os.Rename(tmpfile.Name(), bpath); err != nil {
+ return fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
}
return nil
}
fi
set -e
+ # Get the go version we should use for bootstrapping
+ GO_VERSION=`grep 'const goversion =' $LOCAL_ARVADOS_ROOT/lib/install/deps.go |awk -F'"' '{print $2}'`
+
if test "$1" = localdemo -o "$1" = publicdemo ; then
BUILDTYPE=demo
else
fi
docker build --build-arg=BUILDTYPE=$BUILDTYPE $NO_CACHE \
+ --build-arg=go_version=$GO_VERSION \
--build-arg=arvados_version=$ARVADOS_BRANCH \
--build-arg=workbench2_version=$WORKBENCH2_BRANCH \
--build-arg=workdir=/tools/arvbox/lib/arvbox/docker \
"$LOCAL_ARVADOS_ROOT"
docker tag $FORCE arvados/arvbox-base:$GITHEAD arvados/arvbox-base:latest
docker build $NO_CACHE \
+ --build-arg=go_version=$GO_VERSION \
--build-arg=arvados_version=$ARVADOS_BRANCH \
--build-arg=workbench2_version=$WORKBENCH2_BRANCH \
-t arvados/arvbox-$BUILDTYPE:$GITHEAD \
build-essential ca-certificates git libpam0g-dev wget
ENV GOPATH /var/lib/gopath
+ARG go_version
-# Get Go 1.16.9
+# Get Go
RUN cd /usr/src && \
- wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
- tar xzf go1.16.9.linux-amd64.tar.gz && \
- ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
- ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
- ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
- ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+ wget https://golang.org/dl/go${go_version}.linux-amd64.tar.gz && \
+ tar xzf go${go_version}.linux-amd64.tar.gz && \
+ ln -s /usr/src/go/bin/go /usr/local/bin/go-${go_version} && \
+ ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${go_version} && \
+ ln -s /usr/local/bin/go-${go_version} /usr/local/bin/go && \
+ ln -s /usr/local/bin/gofmt-${go_version} /usr/local/bin/gofmt
# the --mount option requires the experimental syntax enabled (enables
# buildkit) on the first line of this file. This Dockerfile must also be built
build-essential ca-certificates git libpam0g-dev wget
ENV GOPATH /var/lib/gopath
+ARG go_version
-# Get Go 1.16.9
RUN cd /usr/src && \
- wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
- tar xzf go1.16.9.linux-amd64.tar.gz && \
- ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
- ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
- ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
- ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+ wget https://golang.org/dl/go${go_version}.linux-amd64.tar.gz && \
+ tar xzf go${go_version}.linux-amd64.tar.gz && \
+ ln -s /usr/src/go/bin/go /usr/local/bin/go-${go_version} && \
+ ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${go_version} && \
+ ln -s /usr/local/bin/go-${go_version} /usr/local/bin/go && \
+ ln -s /usr/local/bin/gofmt-${go_version} /usr/local/bin/gofmt
ARG arvados_version
RUN echo arvados_version is git commit $arvados_version