~/arvados/doc/user/cwl/bwa-mem$ arvados-cwl-runner --project-uuid zzzzz-j7d0g-p32bi47ogkjke11 --create-workflow bwa-mem.cwl
diff --git a/doc/user/cwl/cwl-style.html.textile.liquid b/doc/user/cwl/cwl-style.html.textile.liquid
index 303ae37e9e..911c9ba5a5 100644
--- a/doc/user/cwl/cwl-style.html.textile.liquid
+++ b/doc/user/cwl/cwl-style.html.textile.liquid
@@ -172,7 +172,7 @@ Workflows should always provide @DockerRequirement@ in the @hints@ or @requireme
h3. Build a reusable library of components
-Build a reusable library of components. Share tool wrappers and subworkflows between projects. Make use of and contribute to "community maintained workflows and tools":https://github.com/common-workflow-library and tool registries such as "Dockstore":http://dockstore.org .
+Share tool wrappers and subworkflows between projects. Make use of and contribute to "community maintained workflows and tools":https://github.com/common-workflow-library and tool registries such as "Dockstore":http://dockstore.org .
h3. Supply scripts as input parameters
@@ -208,7 +208,7 @@ h3. Getting the temporary and output directories
You can get the designated temporary directory using @$(runtime.tmpdir)@ in your CWL file, or from the @$TMPDIR@ environment variable in your script.
-Similarly, you can get the designated output directory using $(runtime.outdir), or from the @HOME@ environment variable in your script.
+Similarly, you can get the designated output directory using @$(runtime.outdir)@, or from the @HOME@ environment variable in your script.
h3. Specifying @ResourceRequirement@
@@ -234,3 +234,36 @@ steps:
coresMin: 2
tmpdirMin: 90000
{% endcodeblock %}
+
+h3. Importing data into Keep
+
+You can use HTTP URLs as File input parameters and @arvados-cwl-runner@ will download them to Keep for you:
+
+{% codeblock as yaml %}
+fastq1:
+ class: File
+ location: https://example.com/genomes/sampleA_1.fastq
+fastq2:
+ class: File
+ location: https://example.com/genomes/sampleA_2.fastq
+{% endcodeblock %}
+
+Files are downloaded and stored in Keep collections with HTTP header information stored in metadata. If a file was previously downloaded, @arvados-cwl-runner@ uses HTTP caching rules to decide if a file should be re-downloaded or not.
+
+The default behavior is to transfer the files on the client, prior to submitting the workflow run. This guarantees the data is available when the workflow is submitted. However, if data transfer is time consuming and you are submitting multiple workflow runs in a row, or the node submitting the workflow has limited bandwidth, you can use the @--defer-download@ option to have the data transfer performed by workflow runner process on a compute node, after the workflow is submitted.
+
+@arvados-cwl-runner@ provides two additional options to control caching behavior.
+
+* @--varying-url-params@ will ignore the listed URL query parameters from any HTTP URLs when checking if a URL has already been downloaded to Keep.
+* @--prefer-cached-downloads@ will search Keep for the previously downloaded URL and use that if found, without checking the upstream resource. This means changes in the upstream resource won't be detected, but it also means the workflow will not fail if the upstream resource becomes inaccessible.
+
+One use of this is to import files from "AWS S3 signed URLs":https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html
+
+Here is an example usage. The use of @--varying-url-params=AWSAccessKeyId,Signature,Expires@ is especially relevant, this removes these parameters from the cached URL, which means that if a new signed URL for the same object is generated later, it can be found in the cache.
+
+{% codeblock as sh %}
+arvados-cwl-runner --defer-download \
+ --varying-url-params=AWSAccessKeyId,Signature,Expires \
+ --prefer-cached-downloads \
+ workflow.cwl params.yml
+{% endcodeblock %}
diff --git a/doc/user/cwl/images/crunchstat-summary-html.png b/doc/user/cwl/images/crunchstat-summary-html.png
index 488541b3a7..3832734e70 100644
Binary files a/doc/user/cwl/images/crunchstat-summary-html.png and b/doc/user/cwl/images/crunchstat-summary-html.png differ
diff --git a/doc/user/debugging/container-shell-access.html.textile.liquid b/doc/user/debugging/container-shell-access.html.textile.liquid
index 91347e66f2..9c24980049 100644
--- a/doc/user/debugging/container-shell-access.html.textile.liquid
+++ b/doc/user/debugging/container-shell-access.html.textile.liquid
@@ -9,6 +9,8 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
+{% include 'tutorial_expectations' %}
+
{% include 'notebox_begin' %}
To use this feature, your Arvados installation must be configured to allow container shell access. See "the install guide":{{site.baseurl}}/install/container-shell-access.html for more information.
diff --git a/doc/user/getting_started/setup-cli.html.textile.liquid b/doc/user/getting_started/setup-cli.html.textile.liquid
index 46ea770eff..18f675d04e 100644
--- a/doc/user/getting_started/setup-cli.html.textile.liquid
+++ b/doc/user/getting_started/setup-cli.html.textile.liquid
@@ -11,10 +11,35 @@ SPDX-License-Identifier: CC-BY-SA-3.0
Many operations in Arvados can be performed using either the web Workbench or through command line tools. Some operations can only be done using the command line.
-To use the command line tools, you can either log into an Arvados-managed VM instance where those tools are pre-installed, or install the Arvados tools on your own system.
+To use the command line tools, you can either log into an Arvados virtual machine where those tools are pre-installed, or install the Arvados tools on your own system.
-To log into an Arvados-managed VM, see instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html .
+h2. Option 1: Using an Arvados virtual machine
-To install the Arvados tools on your own system, you should install the "Command line SDK":{{site.baseurl}}/sdk/cli/install.html (requires Ruby) and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html (requires Python). You may also want to install "arvados-cwl-runner":{{site.baseurl}}/sdk/python/arvados-cwl-runner.html to submit workflows and "arvados-fuse":{{site.baseurl}}/sdk/python/arvados-fuse.html to mount keep as a filesystem.
+This is the command line interface we recommend for most day-to-day work, because the tools are all preinstalled and preconfigured for you. You can log in to any virtual machine where you have permission by using:
-Once you are logged in or have command line tools installed, see "getting an API token":{{site.baseurl}}/user/reference/api-tokens.html and "check your environment":{{site.baseurl}}/user/getting_started/check-environment.html .
+* "the Webshell client":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html accessible through Arvados Workbench
+* "Unix SSH clients":{{site.baseurl}}/user/getting_started/ssh-access-unix.html
+* "Windows SSH clients":{{site.baseurl}}/user/getting_started/ssh-access-windows.html
+
+h2. Option 2: Installing Arvados tools on your own system
+
+This option gives you more flexibility in your work, but takes more time to set up.
+
+h3. Configure Arvados package repositories for your system
+
+Doing this isn't strictly required for most tools, but will streamline the installation process. Follow the "Arvados package repository instructions":{{site.baseurl}}/install/packages.html.
+
+h3. Install individual tool packages
+
+Here are the client packages you can install on your system. You can skip any you don't want or need except for the Python SDK (most other tools require it).
+
+* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html: This provides an Arvados API client in Python, as well as low-level command line tools.
+* "Command-line SDK":{{site.baseurl}}/sdk/cli/install.html: This provides the high-level @arv@ command and user interface to the Arvados API.
+* "FUSE Driver":{{site.baseurl}}/sdk/fuse/install.html: This provides the @arv-mount@ command and FUSE driver that lets you access Keep using standard Linux filesystem tools.
+* "CWL Runner":{{site.baseurl}}/sdk/python/arvados-cwl-runner.html: This provides the @arvados-cwl-runner@ command to register and run workflows in Crunch.
+* "crunchstat-summary":{{site.baseurl}}/user/cwl/crunchstat-summary.html: This tool provides performance reports for Crunch containers.
+* "arvados-client":{{site.baseurl}}/user/debugging/container-shell-access.html: This tool provides subcommands for inspecting Crunch containers, both interactively while they're running and after they've finished.
+
+h2. After Installation: Check your environment
+
+Once you are logged in or have command line tools installed, move on to "getting an API token":{{site.baseurl}}/user/reference/api-tokens.html and "checking your environment":{{site.baseurl}}/user/getting_started/check-environment.html.
diff --git a/doc/user/getting_started/ssh-access-unix.html.textile.liquid b/doc/user/getting_started/ssh-access-unix.html.textile.liquid
index 80cb391314..b131b5b36d 100644
--- a/doc/user/getting_started/ssh-access-unix.html.textile.liquid
+++ b/doc/user/getting_started/ssh-access-unix.html.textile.liquid
@@ -37,9 +37,9 @@ Enter same passphrase again:
* @-t@ specifies the key type (must be "rsa")
* @-C@ specifies a comment (to remember which account the key is associated with)
-We strongly recommend that you protect your key with a passphrase. This means that when the key is used, you will be required to enter the passphrase. However, unlike logging into remote system using a password, the passphrase is never sent over the network, it is only used to decrypt your private key.
+We strongly recommend that you protect your key with a passphrase. This means that when the key is used, you will be required to enter the passphrase. However, unlike logging into remote system using a password, the passphrase is never sent over the network; it is only used to decrypt your private key locally.
-Display the contents of @~/.ssh/id_rsa.pub@ (this is your public key) using @cat@ and then copy it onto the clipboard:
+Display the contents of @~/.ssh/id_rsa.pub@ (this is your public key) using @cat@, and then copy it onto the clipboard. The content of the public key may look similar to the following example:
$ cat ~/.ssh/id_rsa.pub
@@ -47,6 +47,8 @@ ssh-rsa AAAAB3NzaC1ycEDoNotUseExampleKeyDoNotUseExampleKeyDoNotUseExampleKeyDoNo
+* The above is a specimen that cannot be used as a valid public key.
+
Now you can set up @ssh-agent@ (next) or proceed with "adding your key to the Arvados Workbench.":#workbench
h3. Set up ssh-agent (optional)
@@ -55,13 +57,13 @@ If you find you are entering your passphrase frequently, you can use @ssh-agent@
notextile. $ ssh-add -l
-If you get the error "Could not open a connection to your authentication agent" you will need to run @ssh-agent@ with the following command:
+If you get the error "_Could not open a connection to your authentication agent_", you will need to run @ssh-agent@ with the following command:
-notextile. $ eval $(ssh-agent -s)
+notextile. $ eval "$(ssh-agent -s)"
-@ssh-agent -s@ prints out values for environment variables SSH_AUTH_SOCK and SSH_AGENT_PID and then runs in the background. Using "eval" on the output as shown here causes those variables to be set in the current shell environment so that subsequent calls to SSH can discover how to access the agent process.
+@ssh-agent -s@ runs an agent process in the background to hold your SSH credentials, and it prints out the values of environment variables @SSH_AUTH_SOCK@ and @SSH_AGENT_PID@. By applying the shell builtin @eval@ to this output, as we show here using the shell command-substitution syntax, we set those variables in the current shell environment. In this way, subsequent invocations of @ssh@ in this shell session will be able to access the agent process for the credentials without asking you each time.
-After running @ssh-agent@, or if @ssh-add -l@ prints "The agent has no identities", add your key using the following command. The passphrase to decrypt the key is the same used to protect the key when it was created with @ssh-keygen@:
+After running @ssh-agent@, or if @ssh-add -l@ prints "_The agent has no identities_", add your private key to the SSH agent using the following command. The passphrase to decrypt the key is the same one used to protect the key when it was created with @ssh-keygen@:
$ ssh-add
@@ -70,7 +72,7 @@ Identity added: /home/example/.ssh/id_rsa (/home/example/.ssh/id_rsa)
-When everything is set up, @ssh-add -l@ should yield output that looks something like this:
+When everything is set up, @ssh-add -l@ should yield output that looks like this:
$ ssh-add -l
@@ -82,29 +84,29 @@ When everything is set up, @ssh-add -l@ should yield output that looks something
h3. Connecting directly
-If the VM is available on the public Internet (or you are on the same private network as the VM) you can connect directly with @ssh@. You can probably copy-and-paste the text from *Command line* column directly into a terminal.
+If the VM is available on the public Internet (or you are on the same private network as the VM), you can connect directly with @ssh@. You can copy-and-paste the text from the *Command line* column (see the screenshot above) directly into a shell session.
-Use the following example command to connect as _you_ to the _shell.ClusterID.example.com_ VM instance. Replace *you@shell.ClusterID.example.com
* at the end of the following command with your *login* and *hostname* from Workbench.
+Use the following example command to connect, as the user "_you_" to the VM instance at the hostname "_shell.ClusterID.example.com_". Replace *you@shell.ClusterID.example.com
* at the end of the following command with your actual *login* and *hostname* from Workbench.
notextile. $ ssh you@shell.ClusterID.example.com
h3. Connecting through switchyard
-Some Arvados installations use "switchyard" to isolate shell VMs from the public Internet.
+Some Arvados installations use "switchyard" to isolate shell VMs from the public Internet. In such cases, you cannot log in directly to virtual machines over the public Internet. Instead, you log into a "switchyard" server and then tell the switchyard which virtual machine you want to connect to.
Use the following example command to connect to the _shell_ VM instance as _you_. Replace *you@shell
* at the end of the following command with your *login* and *hostname* from Workbench:
notextile. $ ssh -o "ProxyCommand ssh -p2222 turnout@switchyard.ClusterID.example.com -x -a shell " -x you@shell
-This command does several things at once. You usually cannot log in directly to virtual machines over the public Internet. Instead, you log into a "switchyard" server and then tell the switchyard which virtual machine you want to connect to.
+This command does several things at once.
* @-o "ProxyCommand ..."@ configures SSH to run the specified command to create a proxy and route your connection through it.
* @-p2222@ specifies that the switchyard is running on non-standard port 2222.
* turnout@switchyard.{{ site.arvados_api_host }}
specifies the user (@turnout@) and hostname (@switchyard.{{ site.arvados_api_host }}@) of the switchyard server that will proxy our connection to the VM.
* @-x@ tells SSH not to forward your X session to the switchyard.
* @-a@ tells SSH not to forward your ssh-agent credentials to the switchyard.
-* *@shell@* is the name of the VM that we want to connect to. This is sent to the switchyard server as if it were an SSH command, and the switchyard server connects to the VM on our behalf.
-* After the ProxyCommand section, we repeat @-x@ to disable X session forwarding to the virtual machine.
+* *@shell@* is the host name of the VM that we want to connect to. In summary, the string inside the quotation marks is sent to the switchyard server, as if it were an SSH command, and the switchyard server connects to the VM on our behalf.
+* After the @ProxyCommand@ section, we repeat @-x@ to disable X session forwarding to the virtual machine.
* Finally, *you@shell
* specifies your login name and repeats the hostname of the VM. The username can be found in the *logins* column in the VMs Workbench page, discussed in the previous section.
You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
diff --git a/doc/user/getting_started/vm-login-with-webshell.html.textile.liquid b/doc/user/getting_started/vm-login-with-webshell.html.textile.liquid
index 0aeabab11b..33168dda3c 100644
--- a/doc/user/getting_started/vm-login-with-webshell.html.textile.liquid
+++ b/doc/user/getting_started/vm-login-with-webshell.html.textile.liquid
@@ -19,10 +19,10 @@ Webshell gives you access to an arvados virtual machine from your browser with n
Some Arvados clusters may not have webshell set up. If you do not see a "Log in" button or "web shell" column, you will have to follow the "Unix":ssh-access-unix.html or "Windows":ssh-access-windows.html @ssh@ instructions.
{% include 'notebox_end' %}
-In the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Virtual machines* to see the list of virtual machines you can access. If you do not have access to any virtual machines, please click on Send request for shell access (if present) or contact your system administrator. For the Arvados Playground, this is "info@curii.com":mailto:info@curii.com .
+In the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the _Account Management_ menu, and click on the menu item *Virtual machines* to see the list of virtual machines you can access. If you do not have access to any virtual machines, please click on SEND REQUEST FOR SHELL ACCESS (if present) or contact your system administrator. For the Arvados Playground, this is "info@curii.com":mailto:info@curii.com .
-Each row in the Virtual Machines panel lists the hostname of the VM, along with a Log in as *you*
button under the column "Web shell". Clicking on this button will open up a webshell terminal for you in a new browser tab and log you in.
+Each row in the Virtual Machines panel lists the hostname of the VM, along with a Log in as [your name] button under the column "*Web shell*". Clicking on this button will open up a webshell terminal for you in a new browser tab and log you in.
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/vm-access-with-webshell.png!
+!{width: 100%;}{{ site.baseurl }}/images/vm-access-with-webshell.png!
You are now ready to work in your Arvados VM.
diff --git a/doc/user/getting_started/workbench.html.textile.liquid b/doc/user/getting_started/workbench.html.textile.liquid
index 7091e31eae..d96280d30a 100644
--- a/doc/user/getting_started/workbench.html.textile.liquid
+++ b/doc/user/getting_started/workbench.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
{% include 'notebox_begin' %}
-This guide covers the classic Arvados Workbench web application, sometimes referred to as "Workbench 1". There is also a new Workbench web application under development called "Workbench 2". This guide will be updated to cover "Workbench 2" in the future. See "Workbench 2 migration":{{site.baseurl}}/user/topics/workbench-migration.html for more information.
+This guide covers modern Arvados Workbench web application, which may be referred to as "Workbench 2" to distinguish it from the previous Arvados Workbench web application ("Workbench 1"). Documentation for the classic Workbench can be found in "older versions of the user guide":https://doc.arvados.org/v2.6/user/getting_started/workbench.html . See also "Workbench 2 migration":{{site.baseurl}}/user/topics/workbench-migration.html for more information.
{% include 'notebox_end' %}
You can access the Arvados Workbench used in this guide using this link:
@@ -27,6 +27,6 @@ h2. Logging in
You will be asked to log in. Arvados uses only your name and email address for identification, and will never access any personal information. If you are accessing Arvados for the first time, the Workbench may indicate your account status is *New / inactive*. If this is the case, contact the administrator of the Arvados instance to request activation of your account.
-Once your account is active, logging in to the Workbench will present you with the Dashboard. This gives a summary of your projects and recent activity in the Arvados instance. You are now ready to "upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html or "run your first workflow.":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html
+Once your account is active, logging in to the Workbench will present you with an overview of your Home Projects. You are now ready to "upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html or "run your first workflow.":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/workbench-dashboard.png!
+!{width: 100%;}{{ site.baseurl }}/images/workbench-first-page.png!
diff --git a/doc/user/reference/api-tokens.html.textile.liquid b/doc/user/reference/api-tokens.html.textile.liquid
index 6afc20bf4f..4c35530e60 100644
--- a/doc/user/reference/api-tokens.html.textile.liquid
+++ b/doc/user/reference/api-tokens.html.textile.liquid
@@ -15,11 +15,11 @@ Access the Arvados Workbench using this link: "{{site.arvados_workbench_host}}/"
Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or SSH (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
-In the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Current token*, which lists your current token and instructions to set up your environment.
+In the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the _Account Management_ menu. Then, in the pop-up menu, click on the menu item *Get API token*. This will open a dialog box that lists your current token and the instructions for setting up your environment.
h2. Setting environment variables
-The *Current token* page, accessed using the dropdown menu icon in the upper right corner of the top navigation menu, includes a command you may copy and paste directly into the shell. It will look something as the following.
+In the dialog box opened after clicking on the *Get API token* menu item, there is a sequence of commands you may copy and paste directly into the shell. It will look something as the following.
bc. HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
export ARVADOS_API_TOKEN=2jv9346o396exampledonotuseexampledonotuseexes7j1ld
@@ -38,9 +38,12 @@ $ echo "ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN" >> ~/.conf
+* The output-redirection operator @>@ in the first command will cause the target file @~/.config/arvados/settings.conf@ to be created anew, wiping out the content of any existing file at that path.
+* The @>>@ operator in the second command appends to the target file.
+
h2. .bashrc
-Alternately, you may add the declarations of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system on which you intend to use the Arvados client. If you have already put the variables into the environment following the instructions above, you can use these commands to append the environment variables to your @~/.bashrc@:
+Alternately, you may add the definitions of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system where you intend to use the Arvados client. If you have already put the variables into the environment following the instructions above, you can use the commands below to append to your @~/.bashrc@, which tells Bash to export them as environment variables in newly-started interactive shell sessions:
$ echo "export ARVADOS_API_HOST=$ARVADOS_API_HOST" >> ~/.bashrc
diff --git a/doc/user/topics/arv-copy.html.textile.liquid b/doc/user/topics/arv-copy.html.textile.liquid
index 15c9623224..a05620d62d 100644
--- a/doc/user/topics/arv-copy.html.textile.liquid
+++ b/doc/user/topics/arv-copy.html.textile.liquid
@@ -15,7 +15,7 @@ This tutorial describes how to copy Arvados objects from one cluster to another
h2. arv-copy
-@arv-copy@ allows users to copy collections, workflow definitions and projects from one cluster to another.
+@arv-copy@ allows users to copy collections, workflow definitions and projects from one cluster to another. You can also use @arv-copy@ to import resources from HTTP URLs into Keep.
For projects, @arv-copy@ will copy all the collections workflow definitions owned by the project, and recursively copy subprojects.
@@ -71,10 +71,14 @@ Additionally, if you need to specify the storage classes where to save the copie
h3. How to copy a workflow
+Copying workflows requires @arvados-cwl-runner@ to be available in your @$PATH@.
+
We will use the uuid @jutro-7fd4e-mkmmq53m1ze6apx@ as an example workflow.
+Arv-copy will infer the source cluster is @jutro@ from the object uuid, and destination cluster is @pirca@ from @--project-uuid@.
+
-~$ arv-copy --src jutro --dst pirca --project-uuid pirca-j7d0g-ecak8knpefz8ere jutro-7fd4e-mkmmq53m1ze6apx
+~$ arv-copy --project-uuid pirca-j7d0g-ecak8knpefz8ere jutro-7fd4e-mkmmq53m1ze6apx
ae480c5099b81e17267b7445e35b4bc7+180: 23M / 23M 100.0%
2463fa9efeb75e099685528b3b9071e0+438: 156M / 156M 100.0%
jutro-4zz18-vvvqlops0a0kpdl: 94M / 94M 100.0%
@@ -91,8 +95,10 @@ h3. How to copy a project
We will use the uuid @jutro-j7d0g-xj19djofle3aryq@ as an example project.
+Arv-copy will infer the source cluster is @jutro@ from the source project uuid, and destination cluster is @pirca@ from @--project-uuid@.
+
-~$ peteramstutz@shell:~$ arv-copy --project-uuid pirca-j7d0g-lr8sq3tx3ovn68k jutro-j7d0g-xj19djofle3aryq
+~$ arv-copy --project-uuid pirca-j7d0g-lr8sq3tx3ovn68k jutro-j7d0g-xj19djofle3aryq
2021-09-08 21:29:32 arvados.arv-copy[6377] INFO:
2021-09-08 21:29:32 arvados.arv-copy[6377] INFO: Success: created copy with uuid pirca-j7d0g-ig9gvu5piznducp
@@ -101,3 +107,23 @@ We will use the uuid @jutro-j7d0g-xj19djofle3aryq@ as an example project.
The name and description of the original project will be used for the destination copy. If a project already exists with the same name, collections and workflow definitions will be copied into the project with the same name.
If you would like to copy the project but not its subproject, you can use the @--no-recursive@ flag.
+
+h3. Importing HTTP resources to Keep
+
+You can also use @arv-copy@ to copy the contents of a HTTP URL into Keep. When you do this, Arvados keeps track of the original URL the resource came from. This allows you to refer to the resource by its original URL in Workflow inputs, but actually read from the local copy in Keep.
+
+
+~$ arv-copy --project-uuid tordo-j7d0g-lr8sq3tx3ovn68k https://example.com/index.html
+tordo-4zz18-dhpb6y9km2byb94
+2023-10-06 10:15:36 arvados.arv-copy[374147] INFO: Success: created copy with uuid tordo-4zz18-dhpb6y9km2byb94
+
+
+
+In addition, when importing from HTTP URLs, you may provide a different cluster than the destination in @--src@. This tells @arv-copy@ to search the other cluster for a collection associated with that URL, and if found, copy the collection from that cluster instead of downloading from the original URL.
+
+The following @arv-copy@ command line options affect the behavior of HTTP import.
+
+table(table table-bordered table-condensed).
+|_. Option |_. Description |
+|==--varying-url-params== VARYING_URL_PARAMS|A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.|
+|==--prefer-cached-downloads==|If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).|
diff --git a/doc/user/topics/arvados-sync-external-sources.html.textile.liquid b/doc/user/topics/arvados-sync-external-sources.html.textile.liquid
index 0ec0098f05..53a79ea23e 100644
--- a/doc/user/topics/arvados-sync-external-sources.html.textile.liquid
+++ b/doc/user/topics/arvados-sync-external-sources.html.textile.liquid
@@ -65,6 +65,8 @@ Users can be identified by their email address or username: the tool will check
Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":{{ site.baseurl }}/admin/group-management.html.
+When using @arvados-sync-groups@, consider setting @Users.CanCreateRoleGroups: false@ in your "cluster configuration":{{site.baseurl}}/admin/config.html to prevent users from creating additional groups.
+
h2. Options
The following command line options are supported:
diff --git a/doc/user/topics/workbench-migration.html.textile.liquid b/doc/user/topics/workbench-migration.html.textile.liquid
index 9a36435eac..7ca04ffefa 100644
--- a/doc/user/topics/workbench-migration.html.textile.liquid
+++ b/doc/user/topics/workbench-migration.html.textile.liquid
@@ -9,17 +9,11 @@ Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Arvados is in the process of migrating from the classic web application, referred to as "Workbench 1", to a completely new web application, referred to as "Workbench 2".
+Beginning in version 2.7, Arvados now defaults to a new web application, referred to as "Workbench 2". This is a major step in the migration from the classic web application, referred to as "Workbench 1". Workbench 1 should be considered deprecated and suppport for the Workbench 1 application will be dropped in a future Arvados release.
!{width: 90%}{{ site.baseurl }}/images/wb2-example.png!
-Workbench 2 is the new Workbench web application that will, over time, replace Workbench 1. Workbench 2 is being built based on user feedback, and it is approaching feature parity with Workbench 1. Workbench 2 has a modern look and feel and offers several advanced features and performance enhancements. Arvados clusters typically have both Workbench applications installed and have a dropdown menu option in the user menu to switch between versions.
-
-!{{ site.baseurl }}/images/switch-to-wb2.png!
-
-Workbench 2 is stable and recommended for general use, but still lacks some features available in the classic Workbench 1 application. When necessary, you can easily switch back:
-
-!{{ site.baseurl }}/images/switch-to-wb1.png!
+Workbench 2 is the new Workbench web application that replaces Workbench 1. Workbench 2 is being built based on user feedback, and has feature parity with Workbench 1. Workbench 2 has a modern look and feel and offers many advanced features and performance enhancements over the previous Workbench application.
Some major improvements of Workbench 2 include:
diff --git a/doc/user/tutorials/add-new-repository.html.textile.liquid b/doc/user/tutorials/add-new-repository.html.textile.liquid
index e28b961238..6046e7d14b 100644
--- a/doc/user/tutorials/add-new-repository.html.textile.liquid
+++ b/doc/user/tutorials/add-new-repository.html.textile.liquid
@@ -24,19 +24,19 @@ Before you start using Git and arvados repositories, you should do some basic co
h2. Add "tutorial" repository
-On the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Repositories*.
+On the Arvados Workbench, click on the dropdown menu icon (Account Management) in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Repositories*.
-In the *Repositories* page, you will see the *Add new repository* button.
+In the *Repositories* page, you will see the + NEW REPOSITORY button.
-!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/repositories-panel.png!
+!{width: 100%;}{{ site.baseurl }}/images/repositories-panel.png!
-Click the *Add new Repository* button to open the popup to add a new arvados repository. You will see a text box where you can enter the name of the repository. Enter *tutorial* in this text box and click on *Create*.
+Click the + NEW REPOSITORY button to open the popup to add a new Arvados repository. You will see a text box where you can enter the name of the repository. Enter *tutorial* in this text box and click on *Create*.
{% include 'notebox_begin' %}
The name you enter here must begin with a letter and can only contain alphanumeric characters.
{% include 'notebox_end' %}
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/add-new-repository.png!
+!{width: 100%;}{{ site.baseurl }}/images/add-new-repository.png!
This will create a new repository with the name @$USER/tutorial@. It can be accessed using the URL https://git.{{ site.arvados_api_host }}/$USER/tutorial.git
or git@git.{{ site.arvados_api_host }}:$USER/tutorial.git
diff --git a/doc/user/tutorials/git-arvados-guide.html.textile.liquid b/doc/user/tutorials/git-arvados-guide.html.textile.liquid
index a552e4ee00..a4ac2a5795 100644
--- a/doc/user/tutorials/git-arvados-guide.html.textile.liquid
+++ b/doc/user/tutorials/git-arvados-guide.html.textile.liquid
@@ -24,7 +24,7 @@ Before you start using Git, you should do some basic configuration (you only nee
~$ git config --global user.email $USER@example.com
-On the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Repositories*. In the *Repositories* page, you should see the @$USER/tutorial@ repository listed in the *name* column. Next to *name* is the column *URL*. Copy the *URL* value associated with your repository. This should look like https://git.{{ site.arvados_api_host }}/$USER/tutorial.git
. Alternatively, you can use git@git.{{ site.arvados_api_host }}:$USER/tutorial.git
+On the Arvados Workbench, click on the dropdown menu icon in the upper right corner of the top navigation menu to access the Account Management menu, and click on the menu item *Repositories*. In the *Repositories* page, you should see the @$USER/tutorial@ repository listed in the *name* column. Next to *name* is the column *URL*. Copy the *URL* value associated with your repository. This should look like https://git.{{ site.arvados_api_host }}/$USER/tutorial.git
. Alternatively, you can use git@git.{{ site.arvados_api_host }}:$USER/tutorial.git
Next, on the Arvados virtual machine, clone your Git repository:
diff --git a/doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid b/doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid
index 9ddec04f5e..234458c82e 100644
--- a/doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid
+++ b/doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid
@@ -22,9 +22,9 @@ h2(#delete-collection). Trashing (deleting) collections
A collection can be trashed using workbench or the arv command line tool.
-h3. Trashing a collection using workbench
+h3. Trashing a collection using Workbench
-To trash a collection using workbench, go to the Data collections tab in the project, and use the trash icon for this collection row.
+To trash a collection using Workbench, open the ï¸ action menu for the collection, and select *Move to trash*. You can do this from the collection page directly, or from the project listing that contains the collection.
h3. Trashing a collection using arv command line tool
@@ -36,11 +36,11 @@ h2(#trash-recovery). Recovering trashed collections
A collection can be untrashed / recovered using workbench or the arv command line tool.
-h3. Untrashing a collection using workbench
+h3. Untrashing a collection using Workbench
-To untrash a collection using workbench, go to trash page on workbench by clicking on the "Trash" icon in the top navigation in workbench and use the recycle icon or selection dropdown option.
+To untrash a collection using Workbench, open the *Trash* page from the left navigation menu. For each collection in this listing, you can press the *Restore* button on the far right to untrash it. You can also open a collection to review its contents. From that collection page, you can open the ï¸ action menu and select *Restore* to untrash the collection.
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/trash-button-topnav.png!
+!{width: 80%}{{ site.baseurl }}/images/trash-buttons.png!
h3. Untrashing a collection using arv command line tool
diff --git a/doc/user/tutorials/tutorial-keep-get.html.textile.liquid b/doc/user/tutorials/tutorial-keep-get.html.textile.liquid
index 05924f8475..5fa31970c4 100644
--- a/doc/user/tutorials/tutorial-keep-get.html.textile.liquid
+++ b/doc/user/tutorials/tutorial-keep-get.html.textile.liquid
@@ -12,36 +12,34 @@ SPDX-License-Identifier: CC-BY-SA-3.0
Arvados Data collections can be downloaded using either the arv commands or using Workbench.
# "*Download using Workbench*":#download-using-workbench
-# "*Sharing collections*":#download-shared-collection
+# "*Creating a special download URL for a collection*":#download-shared-collection
# "*Download using command line tools*":#download-using-arv
h2(#download-using-workbench). Download using Workbench
You can also download Arvados data collections using the Workbench.
-Visit the Workbench *Dashboard*. Click on *Projects* dropdown menu in the top navigation menu, select your *Home* project. You will see the *Data collections* tab, which lists the collections in this project.
+When you visit a project in Workbench (for instance, the *Home Projects* or any projects under it), the collections will show up on the project details page, with "_Data collection_" in the *Type* column.
-You can access the contents of a collection by clicking on the * Show* button next to the collection. This will take you to the collection's page. Using this page you can see the collection's contents, and download individual files.
+Clicking on a collection will bring you to its details page. There, the lower panel acts like a file manager where you can navigate to or search for files, select them for actions, and download them.
-You can now download the collection files by clicking on the button(s).
+To download a file, simply click on the file, or bring up the context menu using right-click or the triple-dot button on its row, and then select the menu item *Download*.
-h2(#download-shared-collection). Sharing collections
+h2(#download-shared-collection). Creating a special download URL for a collection
-h3. Sharing with other Arvados users
+To share a collection with users that do not have an account on your Arvados cluster, locate the collection and then go to the *Sharing settings* dialog box as described above. There, select the *SHARING URLS* tab.
-Collections can be shared with other users on the Arvados cluster by sharing the parent project. Navigate to the parent project using the "breadcrumbs" bar, then click on the *Sharing* tab. From the sharing tab, you can choose which users or groups to share with, and their level of access.
+You can then generate a new sharing URL using the CREATE SHARING URL button, with the option to set an expiration time for the URL. You can then copy the URL to the clipboard for sharing with others. To revoke (that is, delete) a sharing URL, click on the cross icon beside it.
-h3. Creating a special download URL
+!{width: 80%}{{ site.baseurl }}/images/sharing-collection-url.png!_The_ *SHARING URLS* _tab in the_ *Sharing settings* _dialog box, showing the created URL with an expiration time_
-To share a collection with users that do not have an account on your Arvados cluster, visit the collection page using Workbench as described in the above section. Once on this page, click on the Create sharing link button.
+Any user with the sharing URL can download this collection by simply accessing this URL using browser. It will present a downloadable version of the collection as shown below.
-This will create a sharing link for the collection as shown below. You can copy the sharing link in this page and share it with other users.
-
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/shared-collection.png!
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/download-shared-collection.png!
-A user with this url can download this collection by simply accessing this url using browser. It will present a downloadable version of the collection as shown below.
+When a collection is being shared by URL, in the *WITH USERS/GROUS* tab of *Sharing settings*, the following message will appear if *General access* is Private: _Although there aren't specific permissions set, this is publicly accessible via Sharing URL(s)._
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/download-shared-collection.png!
+* *Note:* Sharing by URL is specific to collections. Projects or individual files cannot be shared in this way.
h2(#download-using-arv). Download using command line tools
diff --git a/doc/user/tutorials/tutorial-keep.html.textile.liquid b/doc/user/tutorials/tutorial-keep.html.textile.liquid
index 21efc475c5..1832a1530e 100644
--- a/doc/user/tutorials/tutorial-keep.html.textile.liquid
+++ b/doc/user/tutorials/tutorial-keep.html.textile.liquid
@@ -12,40 +12,40 @@ SPDX-License-Identifier: CC-BY-SA-3.0
Arvados Data collections can be uploaded using either Workbench or the @arv-put@ command line tool.
# "*Upload using Workbench*":#upload-using-workbench
-# "*Creating projects*":#creating-projects
# "*Upload using command line tool*":#upload-using-command
h2(#upload-using-workbench). Upload using Workbench
-To upload using Workbench, visit the Workbench *Dashboard*. Click on *Projects* dropdown menu in the top navigation menu and select your *Home* project or any other project of your choosing. You will see the *Data collections* tab for this project, which lists the collections in this project.
+To upload using Workbench, first identify the project to upload the files into. This is done by browsing your projects in the navigation menu on the left, or to search for the project using the search field on the top.
-To upload files into a new collection, click on *Add data* dropdown menu and select *Upload files from my computer*.
+Having navigated to the project, click on the + NEW button in the top-left corner. In the pop-up menu, select the item *New collection*.
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/upload-using-workbench.png!
+ !{width: 80%;}{{ site.baseurl }}/images/add-new-collection-wb2.png! _Creating a new collection in the project "WGS Processing Tutorial"_
- This will create a new empty collection in your chosen project and will take you to the *Upload* tab for that collection.
+In the dialog box that follows, you will be prompted to create a new collection in your chosen project. Here, the *Collection Name* field is required. After entering the name for this new collection (and optionally other fields), you have the choice to create it with new file updates -- by drag-and-drop into the *Files* area or with the traditional file-upload dialog opened by your browser.
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/upload-tab-in-new-collection.png!
+!{width: 100%;}{{ site.baseurl }}/images/new-collection-modal-wb2.png!_Providing the new collection with a name (required). Optionally, you can upload files in this step._
-Click on the *Browse...* button and select the files you would like to upload. Selected files will be added to a list of files to be uploaded. After you are done selecting files to upload, click on the * Start* button to start upload. This will start uploading files to Arvados and Workbench will show you the progress bar. When upload is completed, you will see an indication to that effect.
+You can then click on the CREATE A COLLECTION button and proceed to the newly-created collection's page. If you don't upload any data when creating the collection, the new collection will be empty, and you can upload files into it later.
-!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/files-uploaded.png!
+!{width: 100%;}{{ site.baseurl }}/images/newly-created-collection-empty-wb2.png!_The newly-created collection without any files yet._
-*Note:* If you leave the collection page during the upload, the upload process will be aborted and you will need to upload the files again.
+In the FILES panel, there is a button labeled UPLOAD DATA . Click on it, and you will be prompted to upload files by drag-and-drop or the file-selection dialog opened by your browser.
-*Note:* You can also use the Upload tab to add additional files to an existing collection.
+The files you choose to upload will then be displayed, and you can review them before clicking on the UPLOAD DATA button to initiate the actual file transfer.
-notextile.
+
!{width: 100%;}{{ site.baseurl }}/images/upload-data-prompt-with-files-wb2.png!_Selecting the files to upload_
-h2(#creating-projects). Creating projects
+Once the file upload completes, you will be notified by a message, and the files will appear under the
FILES panel shortly.
-Files are organized into Collections, and Collections are organized by Projects.
+
!{width: 100%;}{{ site.baseurl }}/images/upload-data-progress-wb2.png!_Upload status being displayed, with the files to appear shortly_
-Click on *Projects*
→ *Add a new project* to add a top level project.
+*Note:* If you leave the collection page during the upload, the upload process will be aborted and you will need to upload the files again.
-To create a subproject, navigate to the parent project, and click on
*Add a subproject*.
+*Note:* You can also use the
UPLOAD DATA button to add additional files to an existing collection.
+
+notextile.
-See "Sharing collections":tutorial-keep-get.html#download-shared-collection for information about sharing projects and collections with other users.
h2(#upload-using-command). Upload using command line tool
@@ -85,12 +85,12 @@ In both examples, the @arv-put@ command created a collection. The first collecti
h3. Locate your collection in Workbench
-Visit the Workbench *Dashboard*. Click on *Projects* dropdown menu in the top navigation menu, select your *Home* project. Your newly uploaded collection should appear near the top of the *Data collections* tab. The collection name printed by @arv-put@ will appear under the *name* column.
+Visit the Workbench and go to your *Home Projects*. Your newly uploaded collection should appear in the main panel. The collection name printed by @arv-put@ will appear under the *Name* column, and its *Type* will be "_Data collection_".
-To move the collection to a different project, check the box at the left of the collection row. Pull down the *Selection...* menu near the top of the page tab, and select *Move selected...* button. This will open a dialog box where you can select a destination project for the collection. Click a project, then finally the Move button.
+Click on the collection's name will lead you to its Workbench page, where you can see the collection's contents and download individual files.
-!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/workbench-move-selected.png!
+To move the collection to a different project, locate the collection and right-click on it. This will bring up a context menu with *Move to*. Click on this item, and you will see a dialog box where you can select the target project to move this collection to, by search or navigation. This context menu is also available from the triple-dot button in the project/collection listing or the collection details page.
-Click on the * Show* button next to the collection's listing on a project page to go to the Workbench page for your collection. On this page, you can see the collection's contents, download individual files, and set sharing options.
+!{width: 80%;}{{ site.baseurl }}/images/workbench-move-wb2.png! _Context menu with the_ *Move to* _item_
notextile.
diff --git a/doc/user/tutorials/tutorial-projects.html.textile.liquid b/doc/user/tutorials/tutorial-projects.html.textile.liquid
new file mode 100644
index 0000000000..b4dc9edea3
--- /dev/null
+++ b/doc/user/tutorials/tutorial-projects.html.textile.liquid
@@ -0,0 +1,41 @@
+---
+layout: default
+navsection: userguide
+title: "Organizating data"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Projects and Collections
+
+In Arvados, files are organized into "collections", and collections are organized by "project".
+
+Only collections can contain files. A collection is a distinct database record identified by a universal unique id (UUID). Arvados maintains a history of changes to the collection. Every collection version has an immutable identifier called a "portable data hash" which is computed from the file content of the collection. This can be used to refer to the immutable file content independently of the collection UUID. If two collections have the same portable data hash, they have the same file content.
+
+Projects contain collections, workflows and workflow runs, and other projects (subprojects). Both collections and projects can have user-provided metadata.
+
+Projects are the main unit of organization and sharing. See "Sharing collections":#sharing-projects for information about sharing projects and collections with other users.
+
+h2(#creating-projects). Creating a project
+
+When you have navigated to any existing project, clicking on
+ NEW → *New project* will prompt you to create a new subproject under the current project.
+
+If you're at the top-level
*Home Projects*, a new top-level project will be created.
+
+Alternatively, you can right-click on the link to an existing project to bring up a context menu, and select *New project*.
+
+h2(#sharing-projects). Sharing projects
+
+Projects can be shared with other users on the Arvados cluster. First, locate the collection or project using any available means (for instance, by manually navigating in the Workbench, or using the Search bar). Then right-click on its link in a listing, or click on the triple-dot button in the details page. You will find the menu item *Share*, which opens the dialog box *Sharing settings*.
+
+To share with other Arvados users, select the *WITH USERS/GROUPS* tab in the *Sharing settings* dialog box. Under *Add people and groups*, in the input field you can search for the user or group names. Select one you will be sharing with, choose the *Authorization* level (Read/Write/Manage) in the drop-down menu, and click on the plus sign (+) on the right. This can be repeated for other users or groups, each with their own *Authorization* level. The selected ones will appear under *People with access*. You can revisit the *Sharing settings* dialog box to modify the users or their access levels at a later time.
+
+The *General access* drop-down menu controls the default sharing setting, with the following choices:
+
+* *Private*: This is the initial state when no users or groups have been selected for sharing. At any time, by setting *General access* to private, the current sharing setting will be cleared, and any users or groups formerly with access will lose that access.
+* *Public*: This means the list of *People with access* will include _Anonymous users_, even if they are not users of the current cluster. You can further set their access level in the *Authorization* level.
+* *All users*: This means sharing with other users who are logged in on the current cluster.
+* *Shared*: When you choose to share with specific people or groups, *General access* will be set to *Shared*. From this state, you can further specify the default sharing settings for *Public* and *All users*.
diff --git a/doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid b/doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid
index 8a08225723..3259f0d24d 100644
--- a/doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid
+++ b/doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid
@@ -21,18 +21,19 @@ h3. Steps
notextile.
-# Start from the *Workbench Dashboard*. You can access the Dashboard by clicking on * Dashboard* in the upper left corner of any Workbench page.
-# Click on the Run a process... button. This will open a dialog box titled *Choose a pipeline or workflow to run*.
-# In the search box, type in *bwa-mem.cwl*.
-# Select * bwa-mem.cwl* and click the Next: choose inputs button. This will create a new process in your *Home* project and will open it. You can now supply the inputs for the process. Please note that all required inputs are populated with default values and you can change them if you prefer.
-# For example, let's see how to set read pair *read_p1* and *read_p2* for this workflow. Click the Choose button beneath the *read_p1* header. This will open a dialog box titled *Choose a file*.
-# In the file dialog, click on *Home * menu and then select *All Projects*.
-# Enter *HWI-ST1027* into the search box. You will see one or more collections. Click on * HWI-ST1027_129_D0THKACXX for CWL tutorial*
-# The right hand panel will list two files. Click on the first one ending in "_1" and click the OK button.
-# Repeat the steps 5-8 to set the *read_p2* except selecting the second file ending in "_2"
-# Scroll to the bottom of the "Inputs" panel and click on the Run button. The page updates to show you that the process has been submitted to run on the Arvados cluster.
-# Once the process starts running, you can track the progress by watching log messages from the component(s). This page refreshes automatically. You will see a complete label when the process completes successfully.
-# Click on the *Output* link to see the results of the process. This will load a new page listing the output files from this process. You'll see the output SAM file from the alignment tool under the *Files* tab.
-# Click on the download button to the right of the SAM file to download your results.
+# Click on the + NEW button in the top-left.
+# In the pop-up menu, select * Run a workflow*. This will open the _Run Process_ panel in the Workbench.
+# In the search field under *Choose a workflow*, type in _bwa-mem.cwl_.
+# Select *bwa-mem.cwl* in the search results, and click the NEXT button. This will create a new process in one of your Home Projects and will open it. To specify the project for the workflow run, click on the input line below "*Project where the workflow will run*", and in the pop-up dialog box, choose a project under your Home Projects.
+# You can now supply the inputs for the process. Please note that all required inputs are populated with default values and you can change them if you prefer.
+# For example, let's see how to set read pair *read_p1* and *read_p2* for this workflow. Click on the input line under the *read_p1* header. This will open a dialog box titled *Choose a file*.
+# Enter the search terms _user guide resources_ into the *Search for a Project* field on the left. You will see one or more collections in the search results appearing below and, among them, the one with the exact title * User guide resources*. Your goal is to locate the file _HWI-ST1027_129_D0THKACXX.1_1.fastq_.
+# You may either locate the file manually, by clicking on the triangles ⶠto the left of each item to expand them (projects and the collections under it) until you find the file, or by filtering the search results using the *Filter Collections list in Projects* field, for example, with a term like "_HWI-ST1027_".
+# Either way, you will find the file *HWI-ST1027_129_D0THKACXX.1_1.fastq* in the search results. Click on it, and then the OK button in the bottom-right.
+# Repeat the steps 7--9 to set the value for *read_p2*, except selecting the file ending in "_2"
+# Scroll to the bottom of the "Inputs" panel and click on the RUN WORKFLOW button. The page updates to show you that the process has been queued to run on the Arvados cluster.
+# Once the process starts running, you can track the progress by watching the log messages from the component(s) (scroll down to the *Logs* panel). This page refreshes automatically, and you can also click on the REFRESH button on the top of the page. You will see a Completed label when the process completes successfully.
+# The output of the workflow can be found by following the link "Output from bwa-mem.cwl" under the heading *Output collection* in the main or DETAILS panel, or in the OUTPUTS panel further down. Click on the *Output from bwa-mem.cwl* link to see the detailed results from the workflow run. This will lead you to a page that lists the metadata of the outputs, and you'll see the output SAM file there, in the FILES panel.
+# To download your results, simply click on the SAM file name.
notextile.
diff --git a/doc/user/tutorials/wgs-tutorial.html.textile.liquid b/doc/user/tutorials/wgs-tutorial.html.textile.liquid
index 81ad97ed83..b64dc828bd 100644
--- a/doc/user/tutorials/wgs-tutorial.html.textile.liquid
+++ b/doc/user/tutorials/wgs-tutorial.html.textile.liquid
@@ -58,21 +58,24 @@ _Ways to Learn More About CWL_
h2. 3. Setting Up to Run the WGS Processing Workflow
-Letâs get a little familiar with the Arvados Workbench while also setting up to run the WGS processing tutorial workflow. Logging into the workbench will present you with the Dashboard. This gives a summary of your projects and recent activity in your Arvados instance, i.e. the Arvados Playground. The Dashboard will only give you information about projects and activities that you have permissions to view and/or access. Other users' private or restricted projects and activities will not be visible by design.
+Letâs get a little familiar with the Arvados Workbench while also setting up to run the WGS processing tutorial workflow. Logging into the workbench will present you with the front page. This gives a summary of your projects in your Arvados instance (i.e. the Arvados Playground) as well as a left hand side navigation bar, top search bar, and help, profile settings, and notifications on the top right. The front page will only give you information about projects and activities that you have permissions to view and/or access. Other users' private or restricted projects and activities will not be visible by design.
h3. 3a. Setting up a New Project
Projects in Arvados help you organize and track your work - and can contain data, workflow code, details about workflow runs, and results. Letâs begin by setting up a new project for the work you will be doing in this walkthrough.
-To create a new project, go to the Projects dropdown menu and select âAdd a New Projectâ.
+To create a new project, go to the Projects dropdown menu and select the "+NEW" button, then select âNew projectâ.
!{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image4.png!
- _*Figure 3*: Adding a new project using Arvados Workbench._
+
_*Figure 3*: Adding a new project using Arvados Workbench, select the "+NEW" button in the upper left-hand corner and click "New project"._
-Letâs name your project âWGS Processing Tutorialâ. You can also add a description of your project using the *Edit* button. The universally unique identifier (UUID) of the project can be found in the URL.
+Letâs name your project âWGS Processing Tutorialâ. You can also add a description of your project by typing in the **Description - optional** field. The universally unique identifier (UUID) of the project can be found in the URL, or by clicking the info button on the upper right-hand corner.
!{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image6.png!
- _*Figure 4*: Renaming new project using Arvados Workbench. The UUID of the project can be found in the URL and is highlighted in yellow in this image for emphasis._
+
_*Figure 4*: Renaming new project using Arvados Workbench, enter the name in the "Project Name" box._
+
+
!{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image7.png!
+ _*Figure 5*: The UUID of the project can be found by selecting the "i" in the upper right-hand corner, under "UUID" and copied using the copy to clipboard option, highlighted in yellow in this image for emphasis._
If you choose to use another name for your project, just keep in mind when the project name is referenced in the walkthrough later on.
@@ -80,18 +83,18 @@ h3. 3b. Working with Collections
Collections in Arvados help organize and manage your data. You can upload your existing data into a collection or reuse data from one or more existing collections. Collections allow us to reorganize our files without duplicating or physically moving the data, making them very efficient to use even when working with terabytes of data. Each collection has a universally unique identifier (collection UUID). This is a constant for this collection, even if we add or remove files -- or rename the collection. You use this if we want to to identify the most recent version of our collection to use in our workflows.
-Arvados uses a content-addressable filesystem (i.e. Keep) where the addresses of files are derived from their contents. A major benefit of this is that Arvados can then verify that when a dataset is retrieved it is the dataset you requested and can track the exact datasets that were used for each of our previous calculations. This is what allows you to be certain that we are always working with the data that you think you are using. You use the content address of a collection when you want to guarantee that you use the same version as input to your workflow.
+Arvados uses a content-addressable filesystem (i.e. Keep) where the addresses of files are derived from their contents. A major benefit of this is that Arvados can then verify that when a dataset is retrieved it is the dataset you requested and can track the exact datasets that were used for each of our previous calculations. This is what allows you to be certain that we are always working with the data that you think you are using. You use the portable data hash of a collection when you want to guarantee that you use the same version as input to your workflow.
!{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image1.png!
- _*Figure 5*: A collection in Arvados as viewed via the Arvados Workbench. On the upper left you will find a panel that contains: the name of the collection (editable), a description of the collection (editable), the collection UUID and the content address and content size._
+
_*Figure 6*: A collection in Arvados as viewed via the Arvados Workbench. You will find a panel that contains: the name of the collection (this is editable, if you hit the three dots in the upper right-hand corner and click "Edit collection"), a description of the collection (also editable through the same way), the collection UUID, the portable data hash, content size, and some other information like version number._
Letâs start working with collections by copying the existing collection that stores the FASTQ data being processed into our new âWGS Processing Tutorialâ project.
-First, you must find the collection you are interested in copying over to your project. There are several ways to search for a collection: by collection name, by UUID or by content address. In this case, letâs search for our collection by name.
+First, you must find the collection you are interested in copying over to your project. There are several ways to search for a collection: by collection name, by UUID or by portable data hash. In this case, letâs search for our collection by name.
-In this case it is called âPGP UK FASTQsâ and by searching for it in the âsearch this siteâ box. It will come up and you can navigate to it. You would do similarly if you would want to search by UUID or content address.
+In this case it is called âPGP UK FASTQs (ten genomes)â and by searching for it in the âSearchâ box. It will come up and you can navigate to it. You would do similarly if you would want to search by UUID or portable data hash.
-Now that you have found the collection of FASTQs you want to copy to your project, you can simply use the
Copy to project... button and select your new project to copy the collection there. You can rename your collection whatever you wish, or use the default name on copy and add whatever description you would like.
+Now that you have found the collection of FASTQs you want to copy to your project, you can simply click the three dots in the right corner and click "Make a copy" and select your new project to copy the collection there. You can rename your collection whatever you wish, or use the default name on copy and add whatever description you would like.
@@ -105,17 +108,18 @@ In this section, we will be discussing three ways to run the tutorial workflow u
h3. 4a. Interactively Running a Workflow Using Workbench
-Workflows can be registered in Arvados. Registration allows you to share a workflow with other Arvados users, and letâs them run the workflow by clicking the
Run a process⦠button on the Workbench Dashboard and on the command line by specifying the workflow UUID. Default values can be specified for workflow inputs.
+Workflows can be registered in Arvados. Registration allows you to share a workflow with other Arvados users, and letâs them run the workflow by clicking the "+NEW" button and selecting "Run a workflow" on the Workbench Dashboard or on the command line by specifying the workflow UUID. Default values can be specified for workflow inputs.
We have already previously registered the WGS workflow and set default input values for this set of the walkthrough.
Letâs find the registered WGS Processing Workflow and run it interactively in our newly created project.
-# To find the registered workflow, you can search for it in the search box located in the top right corner of the Arvados Workbench by looking for the name âWGS Processing Workflowâ.
-# Once you have found the registered workflow, you can run it your project by using the
Run this workflow.. button and selecting your project ("WGS Processing Tutorial") that you set up in Section 3a.
-# Default inputs to the registered workflow will be automatically filled in. These inputs will still work. You can verify this by checking the addresses of the collections you copied over to your New Project.
-# The input *Directory of paired FASTQ files* will need to be set. Click on
Choose button, select "PGP UK FASTQs" in the *Choose a dataset* dialog and then click
OK .
-# Now, you can submit your workflow by scrolling to the bottom of the page and hitting the
Run button.
+# To find the registered workflow, in the left-hand navigation bar, select "Public Favorites". That listing will include the "WGS Processing Workflow" project. Open that project, and it will include the workflow "WGS processing workflow scattered over samples". Open that workflow.
+# Once you have found the registered workflow, you can run it your project by using the "Run Workflow" button and selecting your project ("WGS Processing Tutorial") that you set up in Section 3a, under *Project where the workflow will run*.
+
!{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image8.png!
+ _*Figure 7*: This is the page that pops up when you hit "Run Workflow", the input that needs selected is highlighted in yellow._
+# Default inputs to the registered workflow will be automatically filled in. These inputs will still work. You can verify this by checking the addresses of the collections you copied over to your new project.
+# Now, you can submit your workflow by selecting the "Run Workflow" button.
Congratulations! You have now submitted your workflow to run. You can move to Section 5 to learn how to check the state of your submitted workflow and Section 6 to learn how to examine the results of and logs from your workflow.
@@ -171,7 +175,7 @@ The tutorial directories are as follows:
Before we run the WGS processing workflow, we want to adjust the inputs to match those in your new project. The workflow that we want to submit is described by the file @/cwl/@ and the inputs are given by the file @/yml/@. Note: while all the cwl files are needed to describe the full workflow only the single yml with the workflow inputs is needed to run the workflow. The additional yml files (in the helper folder) are provided for testing purposes or if one might want to test or run an underlying subworkflow or cwl for a command line tool by itself.
-Several of the inputs in the yml file point to original content addresses of collections that you make copies of in our New Project. These still work because even though we made copies of the collections into our new project we havenât changed the underlying contents. However, by changing this file is in general how you would alter the inputs in the accompanying yml file for a given workflow.
+Several of the inputs in the yml file point to original portable data hashes of collections that you make copies of in our New Project. These still work because even though we made copies of the collections into our new project we havenât changed the underlying contents. However, by changing this file is in general how you would alter the inputs in the accompanying yml file for a given workflow.
The command to submit to the Arvados Playground Cluster is @arvados-cwl-runner@.
To submit the WGS processing workflow , you need to run the following command replacing YOUR_PROJECT_UUID with the UUID of the new project you created for this tutorial.
@@ -192,23 +196,20 @@ Now, you are ready to check the state of your submitted workflow.
h2. 5. Checking the State Of a Submitted Workflow
-Once you have submitted your workflow, you can examine its state interactively using the Arvados Workbench. If you arenât already viewing your workflow process on the workbench, there several ways to get to your submitted workflow. Here are two of the simplest ways:
-
-* Via the Dashboard: It should be listed at the top of the list of âRecent Processesâ. Just click on the name of your submitted workflow and it will take you to the submitted workflow information.
-* Via Your Project: You will want to go back to your new project, using the Projects pulldown menu or searching for the project name. Note: You can mark a Project as a favorite (if/when you have multiple Projects) to make it easier to find on the pulldown menu using the star next to the project name on the project page.
+Once you have submitted your workflow, you can examine its state interactively using the Arvados Workbench. If you arenât already viewing your workflow process on the workbench, you can navigate there via your project. You will want to go back to your new project, using the projects pulldown menu (the list of projects on the left) or searching for the project name. Note: You can mark a project as a favorite (if/when you have multiple projects) to make it easier to find on the pulldown menu by right-clicking on the project name on the project pulldown menu and selecting "Add to favorites".
-The process you will be looking for will be titled âWGS processing workflow scattered over samplesâ(if you submitted via the command line) or NAME OF REGISTERED WORKFLOW container (if you submitted via the Registered Workflow).
+The process you will be looking for will be titled âWGS processing workflow scattered over samplesâ (if you submitted via the command line/Workbench).
Once you have found your workflow, you can clearly see the state of the overall workflow and underlying steps below by their label.
Common states you will see are as follows:
-*
Queued - Workflow or step is waiting to run
-*
Running or
Active - Workflow is currently running
-*
Complete - Workflow or step has successfully completed
-*
Failing - Workflow is running but has steps that have failed
-*
Failed - Workflow or step did not complete successfully
-*
Cancelled - Workflow or step was either manually cancelled or was canceled by Arvados due to a system error
+* "Queued" - Workflow or step is waiting to run
+* "Running" or "Active"- Workflow is currently running
+* "Complete" - Workflow or step has successfully completed
+* "Failing"- Workflow is running but has steps that have failed
+* "Failed"- Workflow or step did not complete successfully
+* "Cancelled" - Workflow or step was either manually cancelled or was canceled by Arvados due to a system error
Since Arvados Crunch reuses steps and workflows if possible, this workflow should run relatively quickly since this workflow has been run before and you have access to those previously run steps. You may notice an initial period where the top level job shows the option of canceling while the other steps are filled in with already finished steps.
@@ -217,13 +218,13 @@ h2. 6. Examining a Finished Workflow
Once your workflow has finished, you can see how long it took the workflow to run, see scaling information, and examine the logs and outputs. Outputs will be only available for steps that have been successfully completed. Outputs will be saved for every step in the workflow and be saved for the workflow itself. Outputs are saved in collections. You can access each collection by clicking on the link corresponding to the output.
!{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image5.png!
- _*Figure 6*: A completed workflow process in Arvados as viewed via the Arvados Workbench. You can click on the outputs link (highlighted in yellow) to view the outputs. Outputs of a workflow are stored in a collection._
+
_*Figure 8*: A completed workflow process in Arvados as viewed via the Arvados Workbench. You can click on the outputs link (highlighted in yellow) to view the outputs. Outputs of a workflow are stored in a collection._
-If we click on the outputs of the workflow, we will see the output collection.
+If we click on the outputs of the workflow, we will see the output collection. It contains the GVCF, tabix index file, and HTML ClinVar report for each analyzed sample (e.g., set of FASTQs). You can open a report in the browser by selecting it from the listing. You can also download a file to your local machine by right-clicking a file and selecting "Download" from the context menu, or from the action menu available from the far right of each listing.
-Contained in this collection, is the GVCF, tabix index file, and html ClinVar report for each analyzed sample (e.g. set of FASTQs). By clicking on the download button to the right of the file, you can download it to your local machine. You can also use the command line to download single files or whole collections to your machine. You can examine the outputs of a step similarly by using the arrow to expand the panel to see more details.
+Logs for the main process can be found back on the workflow process page. Selecting the "LOGS" button at the top navigates down to the logs. You can view the logs directly through that panel, or in the upper right-hand corner select the button with hover-over text "Go to Log collection".
-Logs for the main process can be found in the Log tab. There several logs available, so here is a basic summary of what some of the more commonly used logs contain. Let's first define a few terms that will help us understand what the logs are tracking.
+There are several logs available, so here is a basic summary of what some of the more commonly used logs contain. Let's first define a few terms that will help us understand what the logs are tracking.
As you may recall, Arvados Crunch manages the running of workflows. A _container request_ is an order sent to Arvados Crunch to perform some computational work. Crunch fulfils a request by either choosing a worker node to execute a container, or finding an identical/equivalent container that has already run. You can use _container request_ or _container_ to distinguish between a work order that is submitted to be run and a work order that is actually running or has been run. So our container request in this case is just the submitted workflow we sent to the Arvados cluster.
@@ -233,10 +234,11 @@ A _node_ is a compute resource where Arvardos can schedule work. In our case si
** Captures everything written to standard error by the programs run by the executing container
* @node-info.txt@ and @node.json@
** Contains information about the nodes that executed this container. For the Arvados Playground, this gives information about the virtual machine instance that ran the container.
-node.json gives a high level overview about the instance such as name, price, and RAM while node-info.txt gives more detailed information about the virtual machine (e.g. cpu of each processor)
+node.json gives a high level overview about the instance such as name, price, and RAM while node-info.txt gives more detailed information about the virtual machine (e.g., CPU of each processor)
* @crunch-run.txt@ and @crunchstat.txt@
** @crunch-run.txt@ has info about how the container's execution environment was set up (e.g., time spent loading the docker image) and timing/results of copying output data to Keep (if applicable)
** @crunchstat.txt@ has info about resource consumption (RAM, cpu, disk, network) by the container while it was running.
+* @usage_report.html@ can be viewed directly in the browser by clicking on it. It provides a summary and chart of the resource consumption derived from the raw data in @crunchstat.txt@. (Available starting with @arvados-cwl-runner@ 2.7.2).
* @container.json@
** Describes the container (unit of work to be done), contains CWL code, runtime constraints (RAM, vcpus) amongst other details
* @arv-mount.txt@
@@ -268,9 +270,9 @@ Letâs take a peek at a few of these logs to get you more familiar with them.
You can see the output of all the work that arvados-cwl-runner does by managing the execution of the CWL workflow and all the underlying steps and subworkflows.
-Now, letâs explore the logs for a step in the workflow. Remember that those logs can be found by expanding the steps and clicking on the link to the log collection. Letâs look at the log for the step that does the alignment. That step is named bwamem-samtools-view. We can see there are 10 of them because we are aligning 10 genomes. Letâs look at *bwamem-samtools-view2.*
+Now, letâs explore the logs for a subprocess in the workflow. Start by navigating back to the workflow process page. The logs can be found by selecting the appropriate subprocess under the "Subprocesses" tab, and getting the logs in the way as mentioned above. Letâs look at the log for the subprocess that does the alignment. That subprocess is named bwamem-samtools-view. We can see there are 10 of them because we are aligning 10 genomes. Letâs look at *bwamem-samtools-view_2.*
-We click the arrow to open up the step, and then can click on the log collection to access the logs. You may notice there are two sets of seemingly identical logs. One listed under a directory named for a container and one up in the main directory. This is done in case your step had to be automatically re-run due to any issues and gives the logs of each re-run. The logs in the main directory are the logs for the successful run. In most cases this does not happen, you will just see one directory and one those logs will match the logs in the main directory. Letâs open the logs labeled node-info.txt and stderr.txt.
+We click on the subprocess to open it and then can go down to the "Logs" section to access the logs. You may notice there are two sets of seemingly identical logs. One listed under a directory named for a container and one up in the main directory. This is done in case your subprocess had to be automatically re-run due to any issues and gives the logs of each re-run. The logs in the main directory are the logs for the successful run. In most cases this does not happen, you will just see one directory and one those logs will match the logs in the main directory. Letâs open the logs labeled node-info.txt and stderr.txt.
@node-info.txt@ gives us information about detailed information about the virtual machine this step was run on. The tail end of the log should look like the following:
@@ -346,6 +348,8 @@ The tail end of our log should be similar to the following:
This is the command we ran to invoke bwa-mem, and the scaling information for running bwa-mem multi-threaded across 16 cores (15.4x).
+You can also view outputs for the subprocess just like you do for the main workflow process. Back on the subprocess page for *bwamem-samtools-view_2*, the Outputs pane shows the output files of this specific subprocess. In this case, it is a single BAM file. This way, if your workflow succeeds but produces a surprising result, you can download and review the intermediate outputs to investigate further.
+
We hope that now that you have a bit more familiarity with the logs you can continue to use them to debug and optimize your own workflows as you move forward with using Arvados if your own work in the future.
h2. 7. Conclusion
diff --git a/docker/jobs/Dockerfile b/docker/jobs/Dockerfile
index 1b75e13420..05d8547c52 100644
--- a/docker/jobs/Dockerfile
+++ b/docker/jobs/Dockerfile
@@ -3,31 +3,16 @@
# SPDX-License-Identifier: Apache-2.0
# Based on Debian
-FROM debian:buster-slim
+FROM debian:bullseye-slim
MAINTAINER Arvados Package Maintainers
-ENV DEBIAN_FRONTEND noninteractive
-
-RUN apt-get update -q
-RUN apt-get install -yq --no-install-recommends gnupg
-
ARG repo_version
-RUN echo repo_version $repo_version
-ADD apt.arvados.org-$repo_version.list /etc/apt/sources.list.d/
-
-ADD 1078ECD7.key /tmp/
-RUN cat /tmp/1078ECD7.key | apt-key add -
-
-ARG python_sdk_version
ARG cwl_runner_version
-RUN echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
+ADD apt.arvados.org-$repo_version.list /etc/apt/sources.list.d/
+ADD 1078ECD7.key /etc/apt/trusted.gpg.d/arvados.asc
RUN apt-get update -q
-RUN apt-get install -yq --no-install-recommends python3-arvados-cwl-runner=$cwl_runner_version
-
-# use the Python executable from the python-arvados-cwl-runner package
-RUN PYTHON=`ls /usr/share/python3*/dist/python3-arvados-cwl-runner/bin/python|head -n1` && rm -f /usr/bin/python && ln -s $PYTHON /usr/bin/python
-RUN PYTHON3=`ls /usr/share/python3*/dist/python3-arvados-cwl-runner/bin/python3|head -n1` && rm -f /usr/bin/python3 && ln -s $PYTHON3 /usr/bin/python3
+RUN DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends python3-arvados-cwl-runner=$cwl_runner_version
# Install dependencies and set up system.
RUN /usr/sbin/adduser --disabled-password \
@@ -35,3 +20,4 @@ RUN /usr/sbin/adduser --disabled-password \
/usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
USER crunch
+ENV PATH=/usr/lib/python3-arvados-cwl-runner/bin:/usr/local/bin:/usr/bin:/bin
diff --git a/docker/jobs/apt.arvados.org-dev.list b/docker/jobs/apt.arvados.org-dev.list
index 210f5d5511..155244ba9f 100644
--- a/docker/jobs/apt.arvados.org-dev.list
+++ b/docker/jobs/apt.arvados.org-dev.list
@@ -1,2 +1,2 @@
# apt.arvados.org
-deb http://apt.arvados.org/buster buster-dev main
+deb http://apt.arvados.org/bullseye bullseye-dev main
diff --git a/docker/jobs/apt.arvados.org-stable.list b/docker/jobs/apt.arvados.org-stable.list
index 153e729805..5a4b8c91c8 100644
--- a/docker/jobs/apt.arvados.org-stable.list
+++ b/docker/jobs/apt.arvados.org-stable.list
@@ -1,2 +1,2 @@
# apt.arvados.org
-deb http://apt.arvados.org/buster buster main
+deb http://apt.arvados.org/bullseye bullseye main
diff --git a/docker/jobs/apt.arvados.org-testing.list b/docker/jobs/apt.arvados.org-testing.list
index d5f4581685..302862ca64 100644
--- a/docker/jobs/apt.arvados.org-testing.list
+++ b/docker/jobs/apt.arvados.org-testing.list
@@ -1,2 +1,2 @@
# apt.arvados.org
-deb http://apt.arvados.org/buster buster-testing main
+deb http://apt.arvados.org/bullseye bullseye-testing main
diff --git a/go.mod b/go.mod
index aced60dbc4..ccadc8ce9d 100644
--- a/go.mod
+++ b/go.mod
@@ -9,38 +9,40 @@ require (
github.com/Azure/go-autorest/autorest/azure/auth v0.5.9
github.com/Azure/go-autorest/autorest/to v0.4.0
github.com/arvados/cgofuse v1.2.0-arvados1
- github.com/aws/aws-sdk-go v1.25.30
+ github.com/aws/aws-sdk-go v1.44.174
github.com/aws/aws-sdk-go-v2 v0.23.0
github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092
- github.com/coreos/go-oidc v2.1.0+incompatible
+ github.com/coreos/go-oidc/v3 v3.5.0
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
- github.com/creack/pty v1.1.7
- github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible
+ github.com/creack/pty v1.1.18
+ github.com/docker/docker v24.0.9+incompatible
github.com/dustin/go-humanize v1.0.0
github.com/fsnotify/fsnotify v1.4.9
github.com/ghodss/yaml v1.0.0
github.com/go-ldap/ldap v3.0.3+incompatible
github.com/gogo/protobuf v1.3.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
- github.com/gorilla/mux v1.7.2
+ github.com/gorilla/mux v1.8.0
+ github.com/hashicorp/go-retryablehttp v0.7.2
github.com/hashicorp/golang-lru v0.5.1
+ github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87
github.com/imdario/mergo v0.3.12
github.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff
github.com/jmoiron/sqlx v1.2.0
github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc
- github.com/julienschmidt/httprouter v1.2.0
+ github.com/julienschmidt/httprouter v1.3.0
github.com/lib/pq v1.10.2
github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9
- github.com/prometheus/client_golang v1.7.1
- github.com/prometheus/client_model v0.2.0
- github.com/prometheus/common v0.10.0
+ github.com/prometheus/client_golang v1.14.0
+ github.com/prometheus/client_model v0.3.0
+ github.com/prometheus/common v0.39.0
github.com/sirupsen/logrus v1.8.1
- golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871
- golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
- golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
- golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e
- google.golang.org/api v0.20.0
- gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
+ golang.org/x/crypto v0.22.0
+ golang.org/x/net v0.24.0
+ golang.org/x/oauth2 v0.11.0
+ golang.org/x/sys v0.19.0
+ google.golang.org/api v0.126.0
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/src-d/go-billy.v4 v4.0.1
gopkg.in/src-d/go-git.v4 v4.0.0
@@ -48,7 +50,9 @@ require (
)
require (
- cloud.google.com/go v0.54.0 // indirect
+ cloud.google.com/go/compute v1.23.0 // indirect
+ cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.17 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.4 // indirect
@@ -56,54 +60,62 @@ require (
github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
- github.com/Microsoft/go-winio v0.4.17 // indirect
+ github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.1 // indirect
- github.com/containerd/containerd v1.5.10 // indirect
+ github.com/bgentry/speakeasy v0.1.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
- github.com/docker/distribution v2.7.1+incompatible // indirect
+ github.com/dnaeon/go-vcr v1.2.0 // indirect
+ github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-connections v0.3.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/gliderlabs/ssh v0.2.2 // indirect
github.com/go-asn1-ber/asn1-ber v1.4.1 // indirect
+ github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/golang-jwt/jwt/v4 v4.1.0 // indirect
- github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
- github.com/golang/protobuf v1.5.0 // indirect
- github.com/googleapis/gax-go/v2 v2.0.5 // indirect
- github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/s2a-go v0.1.4 // indirect
+ github.com/google/uuid v1.3.1 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
+ github.com/googleapis/gax-go/v2 v2.11.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
- github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.1.0 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pelletier/go-buffruneio v0.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
- github.com/prometheus/procfs v0.6.0 // indirect
+ github.com/prometheus/procfs v0.9.0 // indirect
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
- github.com/satori/go.uuid v1.2.1-0.20180103174451-36e9d2ebbde5 // indirect
+ github.com/satori/go.uuid v1.2.1-0.20180404165556-75cca531ea76 // indirect
github.com/sergi/go-diff v1.0.0 // indirect
github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 // indirect
github.com/src-d/gcfg v1.3.0 // indirect
github.com/xanzy/ssh-agent v0.1.0 // indirect
- go.opencensus.io v0.22.3 // indirect
- golang.org/x/text v0.3.6 // indirect
- golang.org/x/tools v0.1.7 // indirect
- google.golang.org/appengine v1.6.5 // indirect
- google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
- google.golang.org/grpc v1.33.2 // indirect
- google.golang.org/protobuf v1.27.1 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
+ golang.org/x/tools v0.6.0 // indirect
+ google.golang.org/appengine v1.6.7 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
+ google.golang.org/grpc v1.59.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
+ gotest.tools/v3 v3.0.3 // indirect
)
replace github.com/AdRoll/goamz => github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef
diff --git a/go.sum b/go.sum
index 422a891e00..06c82b9e2d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,41 +1,19 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
+cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A=
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.22 h1:bXiQwDjrRmBQOE67bwlvUKAC1EU1yZTPQ38c+bstZws=
github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
@@ -48,328 +26,114 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.4.4 h1:iuooz5cZL6VRcO7DVSFYxR
github.com/Azure/go-autorest/autorest/azure/cli v0.4.4/go.mod h1:yAQ2b6eP/CmLPnmLvxtT1ALIY3OR1oFcCqVBi8vHiTc=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/arvados/cgofuse v1.2.0-arvados1 h1:4Q4vRJ4hbTCcI4gGEaa6hqwj3rqlUuzeFQkfoEA2HqE=
github.com/arvados/cgofuse v1.2.0-arvados1/go.mod h1:79WFV98hrkRHK9XPhh2IGGOwpFSjocsWubgxAs2KhRc=
github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef h1:cl7DIRbiAYNqaVxg3CZY8qfZoBOKrj06H/x9SPGaxas=
github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef/go.mod h1:rCtgyMmBGEbjTm37fCuBYbNL0IhztiALzo3OB9HyiOM=
github.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b h1:hK0t0aJTTXI64lpXln2A1SripqOym+GVNTnwsLes39Y=
github.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.25.30 h1:I9qj6zW3mMfsg91e+GMSN/INcaX9tTFvr/l/BAHKaIY=
-github.com/aws/aws-sdk-go v1.25.30/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.44.174 h1:9lR4a6MKQW/t6YCG0ZKAt1GAkjdEPP8sWch/pfcuR0c=
+github.com/aws/aws-sdk-go v1.44.174/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v0.23.0 h1:+E1q1LLSfHSDn/DzOtdJOX+pLZE2HiNV2yO5AjZINwM=
github.com/aws/aws-sdk-go-v2 v0.23.0/go.mod h1:2LhT7UgHOXK3UXONKI5OMgIyoQL6zTAw/jwIeX6yqzw=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092 h1:0Di2onNnlN5PAyWPbqlPyN45eOQ+QW/J9eqLynt4IV4=
github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092/go.mod h1:8IzBjZCRSnsvM6MJMG8HNNtnzMl48H22rbJL2kRUJ0Y=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4=
-github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw=
+github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7 h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
-github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible h1:nhVo1udYfMj0Jsw0lnqrTjjf33aLpdgW9Wve9fHVzhQ=
-github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
+github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
+github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
+github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-asn1-ber/asn1-ber v1.4.1 h1:qP/QDxOtmMoJVgXHCXNzDpA0+wkgYB2x5QoLMVOciyw=
github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
+github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=
github.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0=
github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -379,629 +143,285 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
+github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I=
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff h1:6NvhExg4omUC9NfA+l4Oq3ibNNeJUdiAF3iBVB0PlDk=
github.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff/go.mod h1:ddfPX8Z28YMjiqoaJhNBzWHapTHXejnB5cDCUWDwriw=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc h1:JJPhSHowepOF2+ElJVyb9jgt5ZyBkPMkPuhS0uODSFs=
github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc/go.mod h1:fNiSoOiEI5KlkWXn26OwKnNe58ilTIkpBlgOrt7Olu8=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 h1:xXn0nBttYwok7DhU4RxqaADEpQn7fEMt5kKc3yoj/n0=
github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9 h1:ZivaaKmjs9q90zi6I4gTLW6tbVGtlBjellr3hMYaly0=
github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
-github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
+github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
+github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/satori/go.uuid v1.2.1-0.20180103174451-36e9d2ebbde5 h1:Jw7W4WMfQDxsXvfeFSaS2cHlY7bAF4MGrgnbd0+Uo78=
-github.com/satori/go.uuid v1.2.1-0.20180103174451-36e9d2ebbde5/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/satori/go.uuid v1.2.1-0.20180404165556-75cca531ea76 h1:ofyVTM1w4iyKwaQIlRR6Ip06mXXx5Cnz7a4mTGYq1hE=
+github.com/satori/go.uuid v1.2.1-0.20180404165556-75cca531ea76/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0=
github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
-github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/xanzy/ssh-agent v0.1.0 h1:lOhdXLxtmYjaHc76ZtNmJWPg948y/RnT+3N3cvKWFzY=
github.com/xanzy/ssh-agent v0.1.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI=
-golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
+golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
+golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
-golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
-golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1010,31 +430,19 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-billy.v4 v4.0.1 h1:iMxwQPj2cuKRyaIZ985zxClkcdTtT5VpXYf4PTJc0Ek=
@@ -1043,54 +451,14 @@ gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOA
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.0.0 h1:9ZRNKHuhaTaJRGcGaH6Qg7uUORO2X0MNB5WL/CDdqto=
gopkg.in/src-d/go-git.v4 v4.0.0/go.mod h1:CzbUWqMn4pvmvndg3gnh5iZFmSsbhyhUWdI0IQ60AQo=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/getopt v0.0.0-20170811000552-20be20937449 h1:UukjJOsjQH0DIuyyrcod6CXHS6cdaMMuJmrt+SN1j4A=
rsc.io/getopt v0.0.0-20170811000552-20be20937449/go.mod h1:dhCdeqAxkyt5u3/sKRkUXuHaMXUu1Pt13GTQAM2xnig=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/lib/boot/cmd.go b/lib/boot/cmd.go
index 4b7284556e..3d653e97af 100644
--- a/lib/boot/cmd.go
+++ b/lib/boot/cmd.go
@@ -67,10 +67,9 @@ func (bcmd bootCommand) run(ctx context.Context, prog string, args []string, std
flags.StringVar(&super.ConfigPath, "config", "/etc/arvados/config.yml", "arvados config file `path`")
flags.StringVar(&super.SourcePath, "source", ".", "arvados source tree `directory`")
flags.StringVar(&super.ClusterType, "type", "production", "cluster `type`: development, test, or production")
- flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for internal services whose InternalURLs are not configured")
+ flags.StringVar(&super.ListenHost, "listen-host", "127.0.0.1", "host name or interface address for internal services whose InternalURLs are not configured")
flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
- flags.StringVar(&super.Workbench2Source, "workbench2-source", "../arvados-workbench2", "path to arvados-workbench2 source tree")
- flags.BoolVar(&super.NoWorkbench1, "no-workbench1", false, "do not run workbench1")
+ flags.BoolVar(&super.NoWorkbench1, "no-workbench1", true, "do not run workbench1")
flags.BoolVar(&super.NoWorkbench2, "no-workbench2", false, "do not run workbench2")
flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
timeout := flags.Duration("timeout", 0, "maximum time to wait for cluster to be ready")
diff --git a/lib/boot/helpers.go b/lib/boot/helpers.go
index 77036e9340..6a5514ada0 100644
--- a/lib/boot/helpers.go
+++ b/lib/boot/helpers.go
@@ -45,7 +45,7 @@ func (super *Supervisor) ClientsWithToken(clusterID, token string) (context.Cont
// communicating with the cluster on behalf of the 'example' user.
func (super *Supervisor) UserClients(clusterID string, rootctx context.Context, c *check.C, conn *rpc.Conn, authEmail string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient, arvados.User) {
login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
- ReturnTo: ",https://example.com",
+ ReturnTo: ",https://controller.api.client.invalid",
AuthInfo: rpc.UserSessionAuthInfo{
Email: authEmail,
FirstName: "Example",
diff --git a/lib/boot/nginx.go b/lib/boot/nginx.go
index b391c4dc8c..9f1091eac3 100644
--- a/lib/boot/nginx.go
+++ b/lib/boot/nginx.go
@@ -5,6 +5,7 @@
package boot
import (
+ "bytes"
"context"
"fmt"
"io/ioutil"
@@ -17,6 +18,7 @@ import (
"strings"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
)
// Run an Nginx process that proxies the supervisor's configured
@@ -46,6 +48,7 @@ func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) er
vars := map[string]string{
"LISTENHOST": extListenHost,
"UPSTREAMHOST": super.ListenHost,
+ "INTERNALSUBNETS": internalSubnets(super.logger),
"SSLCERT": filepath.Join(super.tempdir, "server.crt"),
"SSLKEY": filepath.Join(super.tempdir, "server.key"),
"ACCESSLOG": filepath.Join(super.tempdir, "nginx_access.log"),
@@ -150,3 +153,27 @@ func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) er
}
return waitForConnect(ctx, testurl.Host)
}
+
+// Return 0 or more local subnets as "geo" fragments for Nginx config,
+// e.g., "1.2.3.0/24 0; 10.1.0.0/16 0;".
+func internalSubnets(logger logrus.FieldLogger) string {
+ iproutes, err := exec.Command("ip", "route").CombinedOutput()
+ if err != nil {
+ logger.Warnf("treating all clients as external because `ip route` failed: %s (%q)", err, iproutes)
+ return ""
+ }
+ subnets := ""
+ for _, line := range bytes.Split(iproutes, []byte("\n")) {
+ fields := strings.Fields(string(line))
+ if len(fields) > 2 && fields[1] == "dev" {
+ // lan example:
+ // 192.168.86.0/24 dev ens3 proto kernel scope link src 192.168.86.196
+ // gcp example (private subnet):
+ // 10.47.0.0/24 dev eth0 proto kernel scope link src 10.47.0.5
+ // gcp example (no private subnet):
+ // 10.128.0.1 dev ens4 scope link
+ subnets += fields[0] + " 0; "
+ }
+ }
+ return subnets
+}
diff --git a/lib/boot/passenger.go b/lib/boot/passenger.go
index 5367337e81..bf2ca2a78b 100644
--- a/lib/boot/passenger.go
+++ b/lib/boot/passenger.go
@@ -84,14 +84,9 @@ func (runner installPassenger) Run(ctx context.Context, fail func(error), super
if err != nil {
return err
}
- for _, version := range []string{"2.2.19"} {
- if !strings.Contains(buf.String(), "("+version+")") {
- err = super.RunProgram(ctx, appdir, runOptions{}, "gem", "install", "--user", "--conservative", "--no-document", "bundler:2.2.19")
- if err != nil {
- return err
- }
- break
- }
+ err = super.RunProgram(ctx, appdir, runOptions{}, "gem", "install", "--user", "--conservative", "--no-document", "--version", "~> 2.4.0", "bundler")
+ if err != nil {
+ return err
}
err = super.RunProgram(ctx, appdir, runOptions{}, "bundle", "config", "--set", "local", "path", filepath.Join(os.Getenv("HOME"), ".gem"))
if err != nil {
diff --git a/lib/boot/rails_db.go b/lib/boot/rails_db.go
new file mode 100644
index 0000000000..3464e52b9a
--- /dev/null
+++ b/lib/boot/rails_db.go
@@ -0,0 +1,135 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+ "context"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "git.arvados.org/arvados.git/lib/controller/dblock"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "github.com/sirupsen/logrus"
+)
+
+type railsDatabase struct{}
+
+func (runner railsDatabase) String() string {
+ return "railsDatabase"
+}
+
+// Run checks for and applies any pending Rails database migrations.
+//
+// If running a dev/test environment, and the database is empty, it
+// initializes the database.
+func (runner railsDatabase) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+ err := super.wait(ctx, runPostgreSQL{}, installPassenger{src: "services/api"})
+ if err != nil {
+ return err
+ }
+
+ // determine path to installed rails app or source tree
+ var appdir string
+ if super.ClusterType == "production" {
+ appdir = "/var/lib/arvados/railsapi"
+ } else {
+ appdir = filepath.Join(super.SourcePath, "services/api")
+ }
+
+ // Check for pending migrations before running rake.
+ //
+ // In principle, we could use "rake db:migrate:status" or skip
+ // this check entirely and let "rake db:migrate" be a no-op
+ // most of the time. However, in the most common case when
+ // there are no new migrations, that would add ~2s to startup
+ // time / downtime during service restart.
+
+ todo, err := migrationList(appdir, super.logger)
+ if err != nil {
+ return err
+ }
+
+ // read schema_migrations table (list of migrations already
+ // applied) and remove those entries from todo
+ dbconnector := ctrlctx.DBConnector{PostgreSQL: super.cluster.PostgreSQL}
+ defer dbconnector.Close()
+ db, err := dbconnector.GetDB(ctx)
+ if err != nil {
+ return err
+ }
+ rows, err := db.QueryContext(ctx, `SELECT version FROM schema_migrations`)
+ if err != nil {
+ if super.ClusterType == "production" {
+ return err
+ }
+ super.logger.WithError(err).Info("schema_migrations query failed, trying db:setup")
+ return super.RunProgram(ctx, "services/api", runOptions{env: railsEnv}, "bundle", "exec", "rake", "db:setup")
+ }
+ for rows.Next() {
+ var v string
+ err = rows.Scan(&v)
+ if err != nil {
+ return err
+ }
+ delete(todo, v)
+ }
+ err = rows.Close()
+ if err != nil {
+ return err
+ }
+
+ // if nothing remains in todo, all available migrations are
+ // done, so return without running any [relatively slow]
+ // ruby/rake commands
+ if len(todo) == 0 {
+ return nil
+ }
+
+ super.logger.Infof("%d migrations pending", len(todo))
+ if !dblock.RailsMigrations.Lock(ctx, dbconnector.GetDB) {
+ return context.Canceled
+ }
+ defer dblock.RailsMigrations.Unlock()
+ return super.RunProgram(ctx, appdir, runOptions{env: railsEnv}, "bundle", "exec", "rake", "db:migrate")
+}
+
+func migrationList(dir string, log logrus.FieldLogger) (map[string]bool, error) {
+ todo := map[string]bool{}
+
+ // list versions in db/migrate/{version}_{name}.rb
+ err := fs.WalkDir(os.DirFS(dir), "db/migrate", func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() {
+ return nil
+ }
+ fnm := d.Name()
+ if strings.HasSuffix(fnm, "~") {
+ return nil
+ }
+ if !strings.HasSuffix(fnm, ".rb") {
+ log.Warnf("unexpected file in db/migrate dir: %s", fnm)
+ return nil
+ }
+ for i, c := range fnm {
+ if i > 0 && c == '_' {
+ todo[fnm[:i]] = true
+ break
+ }
+ if c < '0' || c > '9' {
+ // non-numeric character before the
+ // first '_' means this is not a
+ // migration
+ log.Warnf("unexpected file in db/migrate dir: %s", fnm)
+ return nil
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return todo, nil
+}
diff --git a/lib/boot/rails_db_test.go b/lib/boot/rails_db_test.go
new file mode 100644
index 0000000000..5711189e72
--- /dev/null
+++ b/lib/boot/rails_db_test.go
@@ -0,0 +1,52 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+ "bytes"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "gopkg.in/check.v1"
+)
+
+type railsDBSuite struct{}
+
+var _ = check.Suite(&railsDBSuite{})
+
+// Check services/api/db/migrate/*.rb match schema_migrations
+func (s *railsDBSuite) TestMigrationList(c *check.C) {
+ var logbuf bytes.Buffer
+ log := ctxlog.New(&logbuf, "text", "info")
+ todo, err := migrationList("../../services/api", log)
+ c.Check(err, check.IsNil)
+ c.Check(todo["20220804133317"], check.Equals, true)
+ c.Check(logbuf.String(), check.Equals, "")
+
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ cluster, err := cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ db := arvadostest.DB(c, cluster)
+ rows, err := db.Query(`SELECT version FROM schema_migrations`)
+ for rows.Next() {
+ var v string
+ err = rows.Scan(&v)
+ c.Assert(err, check.IsNil)
+ if !todo[v] {
+ c.Errorf("version is in schema_migrations but not services/api/db/migrate/: %q", v)
+ }
+ delete(todo, v)
+ }
+ err = rows.Close()
+ c.Assert(err, check.IsNil)
+
+ // In the test suite, the database should be fully migrated.
+ // So, if there's anything left in todo here, there is
+ // something wrong with our "db/migrate/*.rb ==
+ // schema_migrations" reasoning.
+ c.Check(todo, check.HasLen, 0)
+}
diff --git a/lib/boot/seed.go b/lib/boot/seed.go
deleted file mode 100644
index b43d907201..0000000000
--- a/lib/boot/seed.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package boot
-
-import (
- "context"
-)
-
-// Populate a blank database with arvados tables and seed rows.
-type seedDatabase struct{}
-
-func (seedDatabase) String() string {
- return "seedDatabase"
-}
-
-func (seedDatabase) Run(ctx context.Context, fail func(error), super *Supervisor) error {
- err := super.wait(ctx, runPostgreSQL{}, installPassenger{src: "services/api"})
- if err != nil {
- return err
- }
- if super.ClusterType == "production" {
- return nil
- }
- err = super.RunProgram(ctx, "services/api", runOptions{env: railsEnv}, "bundle", "exec", "rake", "db:setup")
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/lib/boot/supervisor.go b/lib/boot/supervisor.go
index ddc17953d2..ac269b933a 100644
--- a/lib/boot/supervisor.go
+++ b/lib/boot/supervisor.go
@@ -61,8 +61,7 @@ type Supervisor struct {
// explicitly configured in config file. If blank, use a
// random port on ListenHost.
ControllerAddr string
- // Path to arvados-workbench2 source tree checkout.
- Workbench2Source string
+
NoWorkbench1 bool
NoWorkbench2 bool
OwnTemporaryDatabase bool
@@ -112,7 +111,7 @@ func (super *Supervisor) Start(ctx context.Context) {
super.ctx, super.cancel = context.WithCancel(ctx)
super.done = make(chan struct{})
- sigch := make(chan os.Signal)
+ sigch := make(chan os.Signal, 1)
signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
go func() {
defer signal.Stop(sigch)
@@ -205,15 +204,24 @@ func (super *Supervisor) Wait() error {
func (super *Supervisor) startFederation(cfg *arvados.Config) {
super.children = map[string]*Supervisor{}
for id, cc := range cfg.Clusters {
- super2 := *super
yaml, err := json.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{id: cc}})
if err != nil {
panic(fmt.Sprintf("json.Marshal partial config: %s", err))
}
- super2.ConfigYAML = string(yaml)
- super2.ConfigPath = "-"
- super2.children = nil
-
+ super2 := &Supervisor{
+ ConfigPath: "-",
+ ConfigYAML: string(yaml),
+ SourcePath: super.SourcePath,
+ SourceVersion: super.SourceVersion,
+ ClusterType: super.ClusterType,
+ ListenHost: super.ListenHost,
+ ControllerAddr: super.ControllerAddr,
+ NoWorkbench1: super.NoWorkbench1,
+ NoWorkbench2: super.NoWorkbench2,
+ OwnTemporaryDatabase: super.OwnTemporaryDatabase,
+ Stdin: super.Stdin,
+ Stderr: super.Stderr,
+ }
if super2.ClusterType == "test" {
super2.Stderr = &service.LogPrefixer{
Writer: super.Stderr,
@@ -221,7 +229,7 @@ func (super *Supervisor) startFederation(cfg *arvados.Config) {
}
}
super2.Start(super.ctx)
- super.children[id] = &super2
+ super.children[id] = super2
}
}
@@ -282,7 +290,7 @@ func (super *Supervisor) runCluster() error {
if err != nil {
return err
}
- conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY, 0644)
+ conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return err
}
@@ -308,6 +316,7 @@ func (super *Supervisor) runCluster() error {
if super.ClusterType != "production" {
super.prependEnv("PATH", super.tempdir+"/bin:")
}
+ super.setEnv("ARVADOS_SERVER_ADDRESS", super.ListenHost)
// Now that we have the config, replace the bootstrap logger
// with a new one according to the logging config.
@@ -324,13 +333,13 @@ func (super *Supervisor) runCluster() error {
} else if super.SourceVersion == "" {
// Find current source tree version.
var buf bytes.Buffer
- err = super.RunProgram(super.ctx, ".", runOptions{output: &buf}, "git", "diff", "--shortstat")
+ err = super.RunProgram(super.ctx, super.SourcePath, runOptions{output: &buf}, "git", "diff", "--shortstat")
if err != nil {
return err
}
dirty := buf.Len() > 0
buf.Reset()
- err = super.RunProgram(super.ctx, ".", runOptions{output: &buf}, "git", "log", "-n1", "--format=%H")
+ err = super.RunProgram(super.ctx, super.SourcePath, runOptions{output: &buf}, "git", "log", "-n1", "--format=%H")
if err != nil {
return err
}
@@ -355,22 +364,23 @@ func (super *Supervisor) runCluster() error {
createCertificates{},
runPostgreSQL{},
runNginx{},
- runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{seedDatabase{}}},
+ railsDatabase{},
+ runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{railsDatabase{}}},
runServiceCommand{name: "git-httpd", svc: super.cluster.Services.GitHTTP},
runServiceCommand{name: "health", svc: super.cluster.Services.Health},
runServiceCommand{name: "keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
runServiceCommand{name: "keepstore", svc: super.cluster.Services.Keepstore},
runServiceCommand{name: "keep-web", svc: super.cluster.Services.WebDAV},
- runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{seedDatabase{}}},
+ runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{railsDatabase{}}},
installPassenger{src: "services/api", varlibdir: "railsapi"},
- runPassenger{src: "services/api", varlibdir: "railsapi", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, seedDatabase{}, installPassenger{src: "services/api", varlibdir: "railsapi"}}},
- seedDatabase{},
+ runPassenger{src: "services/api", varlibdir: "railsapi", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{
+ createCertificates{},
+ installPassenger{src: "services/api", varlibdir: "railsapi"},
+ railsDatabase{},
+ }},
}
if !super.NoWorkbench1 {
- tasks = append(tasks,
- installPassenger{src: "apps/workbench", varlibdir: "workbench1", depends: []supervisedTask{seedDatabase{}}}, // dependency ensures workbench doesn't delay api install/startup
- runPassenger{src: "apps/workbench", varlibdir: "workbench1", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench", varlibdir: "workbench1"}}},
- )
+ return errors.New("workbench1 is no longer supported")
}
if !super.NoWorkbench2 {
tasks = append(tasks,
@@ -844,7 +854,7 @@ func (super *Supervisor) autofillConfig() error {
if super.NoWorkbench1 && svc == &super.cluster.Services.Workbench1 ||
super.NoWorkbench2 && svc == &super.cluster.Services.Workbench2 ||
!super.cluster.Containers.CloudVMs.Enable && svc == &super.cluster.Services.DispatchCloud {
- // When workbench1 is disabled, it gets an
+ // When Workbench is disabled, it gets an
// ExternalURL (so we have a valid listening
// port to write in our Nginx config) but no
// InternalURLs (so health checker doesn't
diff --git a/lib/boot/workbench2.go b/lib/boot/workbench2.go
index 5a319ebfe4..8c8c607f45 100644
--- a/lib/boot/workbench2.go
+++ b/lib/boot/workbench2.go
@@ -37,25 +37,31 @@ func (runner runWorkbench2) Run(ctx context.Context, fail func(error), super *Su
err = super.RunProgram(ctx, "/var/lib/arvados/workbench2", runOptions{
user: "www-data",
}, "arvados-server", "workbench2", super.cluster.Services.Controller.ExternalURL.Host, net.JoinHostPort(host, port), ".")
- } else if super.Workbench2Source == "" {
- super.logger.Info("skipping Workbench2: Workbench2Source==\"\" and not in production mode")
- return
} else {
- stdinr, stdinw := io.Pipe()
- defer stdinw.Close()
- go func() {
- <-ctx.Done()
- stdinw.Close()
- }()
- if err = os.Mkdir(super.Workbench2Source+"/public/_health", 0777); err != nil && !errors.Is(err, fs.ErrExist) {
+ // super.SourcePath might be readonly, so for
+ // dev/test mode we make a copy in a writable
+ // dir.
+ livedir := super.wwwtempdir + "/workbench2"
+ if err := super.RunProgram(ctx, super.SourcePath+"/services/workbench2", runOptions{}, "rsync", "-a", "--delete-after", super.SourcePath+"/services/workbench2/", livedir); err != nil {
+ fail(err)
+ return
+ }
+ if err = os.Mkdir(livedir+"/public/_health", 0777); err != nil && !errors.Is(err, fs.ErrExist) {
fail(err)
return
}
- if err = ioutil.WriteFile(super.Workbench2Source+"/public/_health/ping", []byte(`{"health":"OK"}`), 0666); err != nil {
+ if err = ioutil.WriteFile(livedir+"/public/_health/ping", []byte(`{"health":"OK"}`), 0666); err != nil {
fail(err)
return
}
- err = super.RunProgram(ctx, super.Workbench2Source, runOptions{
+
+ stdinr, stdinw := io.Pipe()
+ defer stdinw.Close()
+ go func() {
+ <-ctx.Done()
+ stdinw.Close()
+ }()
+ err = super.RunProgram(ctx, livedir, runOptions{
env: []string{
"CI=true",
"HTTPS=false",
diff --git a/lib/cli/get.go b/lib/cli/get.go
index 9625214e22..352e7b9af6 100644
--- a/lib/cli/get.go
+++ b/lib/cli/get.go
@@ -30,12 +30,12 @@ func (getCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, st
flags.SetOutput(stderr)
err = flags.Parse(args)
if err != nil {
- return 2
+ return cmd.EXIT_INVALIDARGUMENT
}
if len(flags.Args()) != 1 {
fmt.Fprintf(stderr, "usage of %s:\n", prog)
flags.PrintDefaults()
- return 2
+ return cmd.EXIT_INVALIDARGUMENT
}
if opts.Short {
opts.Format = "uuid"
diff --git a/lib/cloud/azure/azure.go b/lib/cloud/azure/azure.go
index 1ff0798ea6..71f2a23dc9 100644
--- a/lib/cloud/azure/azure.go
+++ b/lib/cloud/azure/azure.go
@@ -28,6 +28,7 @@ import (
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/Azure/go-autorest/autorest/to"
"github.com/jmcvetta/randutil"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
@@ -238,7 +239,7 @@ type azureInstanceSet struct {
logger logrus.FieldLogger
}
-func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (prv cloud.InstanceSet, err error) {
azcfg := azureInstanceSetConfig{}
err = json.Unmarshal(config, &azcfg)
if err != nil {
@@ -514,20 +515,23 @@ func (az *azureInstanceSet) Create(
AdminUsername: to.StringPtr(az.azconfig.AdminUsername),
LinuxConfiguration: &compute.LinuxConfiguration{
DisablePasswordAuthentication: to.BoolPtr(true),
- SSH: &compute.SSHConfiguration{
- PublicKeys: &[]compute.SSHPublicKey{
- {
- Path: to.StringPtr("/home/" + az.azconfig.AdminUsername + "/.ssh/authorized_keys"),
- KeyData: to.StringPtr(string(ssh.MarshalAuthorizedKey(publicKey))),
- },
- },
- },
},
CustomData: &customData,
},
},
}
+ if publicKey != nil {
+ vmParameters.VirtualMachineProperties.OsProfile.LinuxConfiguration.SSH = &compute.SSHConfiguration{
+ PublicKeys: &[]compute.SSHPublicKey{
+ {
+ Path: to.StringPtr("/home/" + az.azconfig.AdminUsername + "/.ssh/authorized_keys"),
+ KeyData: to.StringPtr(string(ssh.MarshalAuthorizedKey(publicKey))),
+ },
+ },
+ }
+ }
+
if instanceType.Preemptible {
// Setting maxPrice to -1 is the equivalent of paying spot price, up to the
// normal price. This means the node will not be pre-empted for price
@@ -785,6 +789,10 @@ func (ai *azureInstance) Address() string {
}
}
+func (ai *azureInstance) PriceHistory(arvados.InstanceType) []cloud.InstancePrice {
+ return nil
+}
+
func (ai *azureInstance) RemoteUser() string {
return ai.provider.azconfig.AdminUsername
}
diff --git a/lib/cloud/azure/azure_test.go b/lib/cloud/azure/azure_test.go
index b6aa9a16b6..de8d655b19 100644
--- a/lib/cloud/azure/azure_test.go
+++ b/lib/cloud/azure/azure_test.go
@@ -69,14 +69,17 @@ var _ = check.Suite(&AzureInstanceSetSuite{})
const testNamePrefix = "compute-test123-"
-type VirtualMachinesClientStub struct{}
+type VirtualMachinesClientStub struct {
+ vmParameters compute.VirtualMachine
+}
-func (*VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
+func (stub *VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
resourceGroupName string,
VMName string,
parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
parameters.ID = &VMName
parameters.Name = &VMName
+ stub.vmParameters = parameters
return parameters, nil
}
@@ -124,7 +127,7 @@ type testConfig struct {
var live = flag.String("live-azure-cfg", "", "Test with real azure API, provide config file")
-func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
+func GetInstanceSet() (*azureInstanceSet, cloud.ImageID, arvados.Cluster, error) {
cluster := arvados.Cluster{
InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
"tiny": {
@@ -153,8 +156,8 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
return nil, cloud.ImageID(""), cluster, err
}
- ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger())
- return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
+ ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger(), nil)
+ return ap.(*azureInstanceSet), cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
}
ap := azureInstanceSet{
azconfig: azureInstanceSetConfig{
@@ -193,18 +196,25 @@ func (*AzureInstanceSetSuite) TestCreate(c *check.C) {
tags := inst.Tags()
c.Check(tags["TestTagName"], check.Equals, "test tag value")
c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+ if *live == "" {
+ c.Check(ap.vmClient.(*VirtualMachinesClientStub).vmParameters.VirtualMachineProperties.OsProfile.LinuxConfiguration.SSH, check.NotNil)
+ }
instPreemptable, err := ap.Create(cluster.InstanceTypes["tinyp"],
img, map[string]string{
"TestTagName": "test tag value",
- }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+ }, "umask 0600; echo -n test-file-data >/var/run/test-file", nil)
c.Assert(err, check.IsNil)
tags = instPreemptable.Tags()
c.Check(tags["TestTagName"], check.Equals, "test tag value")
c.Logf("instPreemptable.String()=%v Address()=%v Tags()=%v", instPreemptable.String(), instPreemptable.Address(), tags)
-
+ if *live == "" {
+ // Should not have set SSH option, because publickey
+ // arg was nil
+ c.Check(ap.vmClient.(*VirtualMachinesClientStub).vmParameters.VirtualMachineProperties.OsProfile.LinuxConfiguration.SSH, check.IsNil)
+ }
}
func (*AzureInstanceSetSuite) TestListInstances(c *check.C) {
@@ -229,7 +239,7 @@ func (*AzureInstanceSetSuite) TestManageNics(c *check.C) {
c.Fatal("Error making provider", err)
}
- ap.(*azureInstanceSet).manageNics()
+ ap.manageNics()
ap.Stop()
}
@@ -239,7 +249,7 @@ func (*AzureInstanceSetSuite) TestManageBlobs(c *check.C) {
c.Fatal("Error making provider", err)
}
- ap.(*azureInstanceSet).manageBlobs()
+ ap.manageBlobs()
ap.Stop()
}
@@ -263,7 +273,7 @@ func (*AzureInstanceSetSuite) TestDeleteFake(c *check.C) {
c.Fatal("Error making provider", err)
}
- _, err = ap.(*azureInstanceSet).netClient.delete(context.Background(), "fakefakefake", "fakefakefake")
+ _, err = ap.netClient.delete(context.Background(), "fakefakefake", "fakefakefake")
de, ok := err.(autorest.DetailedError)
if ok {
diff --git a/lib/cloud/cloudtest/cmd.go b/lib/cloud/cloudtest/cmd.go
index 0ec79e1175..2dc13e5a51 100644
--- a/lib/cloud/cloudtest/cmd.go
+++ b/lib/cloud/cloudtest/cmd.go
@@ -18,7 +18,6 @@ import (
"git.arvados.org/arvados.git/lib/dispatchcloud"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "golang.org/x/crypto/ssh"
)
var Command command
@@ -65,9 +64,9 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
if err != nil {
return 1
}
- key, err := ssh.ParsePrivateKey([]byte(cluster.Containers.DispatchPrivateKey))
+ key, err := config.LoadSSHKey(cluster.Containers.DispatchPrivateKey)
if err != nil {
- err = fmt.Errorf("error parsing configured Containers.DispatchPrivateKey: %s", err)
+ err = fmt.Errorf("error loading Containers.DispatchPrivateKey: %s", err)
return 1
}
driver, ok := dispatchcloud.Drivers[cluster.Containers.CloudVMs.Driver]
@@ -86,22 +85,24 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
tagKeyPrefix := cluster.Containers.CloudVMs.TagKeyPrefix
tags[tagKeyPrefix+"CloudTestPID"] = fmt.Sprintf("%d", os.Getpid())
if !(&tester{
- Logger: logger,
- Tags: tags,
- TagKeyPrefix: tagKeyPrefix,
- SetID: cloud.InstanceSetID(*instanceSetID),
- DestroyExisting: *destroyExisting,
- ProbeInterval: cluster.Containers.CloudVMs.ProbeInterval.Duration(),
- SyncInterval: cluster.Containers.CloudVMs.SyncInterval.Duration(),
- TimeoutBooting: cluster.Containers.CloudVMs.TimeoutBooting.Duration(),
- Driver: driver,
- DriverParameters: cluster.Containers.CloudVMs.DriverParameters,
- ImageID: cloud.ImageID(*imageID),
- InstanceType: it,
- SSHKey: key,
- SSHPort: cluster.Containers.CloudVMs.SSHPort,
- BootProbeCommand: cluster.Containers.CloudVMs.BootProbeCommand,
- ShellCommand: *shellCommand,
+ Logger: logger,
+ Tags: tags,
+ TagKeyPrefix: tagKeyPrefix,
+ SetID: cloud.InstanceSetID(*instanceSetID),
+ DestroyExisting: *destroyExisting,
+ ProbeInterval: cluster.Containers.CloudVMs.ProbeInterval.Duration(),
+ SyncInterval: cluster.Containers.CloudVMs.SyncInterval.Duration(),
+ TimeoutBooting: cluster.Containers.CloudVMs.TimeoutBooting.Duration(),
+ Driver: driver,
+ DriverParameters: cluster.Containers.CloudVMs.DriverParameters,
+ ImageID: cloud.ImageID(*imageID),
+ InstanceType: it,
+ SSHKey: key,
+ SSHPort: cluster.Containers.CloudVMs.SSHPort,
+ DeployPublicKey: cluster.Containers.CloudVMs.DeployPublicKey,
+ BootProbeCommand: cluster.Containers.CloudVMs.BootProbeCommand,
+ InstanceInitCommand: cloud.InitCommand(cluster.Containers.CloudVMs.InstanceInitCommand),
+ ShellCommand: *shellCommand,
PauseBeforeDestroy: func() {
if *pauseBeforeDestroy {
logger.Info("waiting for operator to press Enter")
diff --git a/lib/cloud/cloudtest/tester.go b/lib/cloud/cloudtest/tester.go
index 9fd7c9e749..a335278ed6 100644
--- a/lib/cloud/cloudtest/tester.go
+++ b/lib/cloud/cloudtest/tester.go
@@ -27,23 +27,25 @@ var (
// configuration. Run() should be called only once, after assigning
// suitable values to public fields.
type tester struct {
- Logger logrus.FieldLogger
- Tags cloud.SharedResourceTags
- TagKeyPrefix string
- SetID cloud.InstanceSetID
- DestroyExisting bool
- ProbeInterval time.Duration
- SyncInterval time.Duration
- TimeoutBooting time.Duration
- Driver cloud.Driver
- DriverParameters json.RawMessage
- InstanceType arvados.InstanceType
- ImageID cloud.ImageID
- SSHKey ssh.Signer
- SSHPort string
- BootProbeCommand string
- ShellCommand string
- PauseBeforeDestroy func()
+ Logger logrus.FieldLogger
+ Tags cloud.SharedResourceTags
+ TagKeyPrefix string
+ SetID cloud.InstanceSetID
+ DestroyExisting bool
+ ProbeInterval time.Duration
+ SyncInterval time.Duration
+ TimeoutBooting time.Duration
+ Driver cloud.Driver
+ DriverParameters json.RawMessage
+ InstanceType arvados.InstanceType
+ ImageID cloud.ImageID
+ SSHKey ssh.Signer
+ SSHPort string
+ DeployPublicKey bool
+ BootProbeCommand string
+ InstanceInitCommand cloud.InitCommand
+ ShellCommand string
+ PauseBeforeDestroy func()
is cloud.InstanceSet
testInstance *worker.TagVerifier
@@ -54,16 +56,60 @@ type tester struct {
failed bool
}
+// Run the test suite once for each applicable permutation of
+// DriverParameters. Return true if everything worked.
+//
+// Currently this means run once for each configured SubnetID.
+func (t *tester) Run() bool {
+ var dp map[string]interface{}
+ if len(t.DriverParameters) > 0 {
+ err := json.Unmarshal(t.DriverParameters, &dp)
+ if err != nil {
+ t.Logger.WithError(err).Error("error decoding configured CloudVMs.DriverParameters")
+ return false
+ }
+ }
+ subnets, ok := dp["SubnetID"].([]interface{})
+ if !ok || len(subnets) <= 1 {
+ // Easy, only one SubnetID to test.
+ return t.runWithDriverParameters(t.DriverParameters)
+ }
+
+ deferredError := false
+ for i, subnet := range subnets {
+ subnet, ok := subnet.(string)
+ if !ok {
+ t.Logger.Errorf("CloudVMs.DriverParameters.SubnetID[%d] is invalid -- must be a string", i)
+ deferredError = true
+ continue
+ }
+ dp["SubnetID"] = subnet
+ t.Logger.Infof("running tests using SubnetID[%d] %q", i, subnet)
+ dpjson, err := json.Marshal(dp)
+ if err != nil {
+ t.Logger.WithError(err).Error("error encoding driver parameters")
+ deferredError = true
+ continue
+ }
+ ok = t.runWithDriverParameters(dpjson)
+ if !ok {
+ t.Logger.Infof("failed tests using SubnetID[%d] %q", i, subnet)
+ deferredError = true
+ }
+ }
+ return !deferredError
+}
+
// Run the test suite as specified, clean up as needed, and return
// true (everything is OK) or false (something went wrong).
-func (t *tester) Run() bool {
+func (t *tester) runWithDriverParameters(driverParameters json.RawMessage) bool {
// This flag gets set when we encounter a non-fatal error, so
// we can continue doing more tests but remember to return
// false (failure) at the end.
deferredError := false
var err error
- t.is, err = t.Driver.InstanceSet(t.DriverParameters, t.SetID, t.Tags, t.Logger)
+ t.is, err = t.Driver.InstanceSet(driverParameters, t.SetID, t.Tags, t.Logger, nil)
if err != nil {
t.Logger.WithError(err).Info("error initializing driver")
return false
@@ -127,7 +173,12 @@ func (t *tester) Run() bool {
defer t.destroyTestInstance()
bootDeadline := time.Now().Add(t.TimeoutBooting)
- initCommand := worker.TagVerifier{Instance: nil, Secret: t.secret, ReportVerified: nil}.InitCommand()
+ initCommand := worker.TagVerifier{Instance: nil, Secret: t.secret, ReportVerified: nil}.InitCommand() + "\n" + t.InstanceInitCommand
+
+ installPublicKey := t.SSHKey.PublicKey()
+ if !t.DeployPublicKey {
+ installPublicKey = nil
+ }
t.Logger.WithFields(logrus.Fields{
"InstanceType": t.InstanceType.Name,
@@ -135,9 +186,10 @@ func (t *tester) Run() bool {
"ImageID": t.ImageID,
"Tags": tags,
"InitCommand": initCommand,
+ "DeployPublicKey": installPublicKey != nil,
}).Info("creating instance")
t0 := time.Now()
- inst, err := t.is.Create(t.InstanceType, t.ImageID, tags, initCommand, t.SSHKey.PublicKey())
+ inst, err := t.is.Create(t.InstanceType, t.ImageID, tags, initCommand, installPublicKey)
lgrC := t.Logger.WithField("Duration", time.Since(t0))
if err != nil {
// Create() might have failed due to a bug or network
diff --git a/lib/cloud/ec2/ec2.go b/lib/cloud/ec2/ec2.go
index 52b73f781c..6251f18df0 100644
--- a/lib/cloud/ec2/ec2.go
+++ b/lib/cloud/ec2/ec2.go
@@ -13,6 +13,9 @@ import (
"encoding/json"
"fmt"
"math/big"
+ "regexp"
+ "strconv"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -27,6 +30,7 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
@@ -40,14 +44,49 @@ const (
)
type ec2InstanceSetConfig struct {
- AccessKeyID string
- SecretAccessKey string
- Region string
- SecurityGroupIDs arvados.StringSet
- SubnetID string
- AdminUsername string
- EBSVolumeType string
- IAMInstanceProfile string
+ AccessKeyID string
+ SecretAccessKey string
+ Region string
+ SecurityGroupIDs arvados.StringSet
+ SubnetID sliceOrSingleString
+ AdminUsername string
+ EBSVolumeType string
+ EBSPrice float64
+ IAMInstanceProfile string
+ SpotPriceUpdateInterval arvados.Duration
+}
+
+type sliceOrSingleString []string
+
+// UnmarshalJSON unmarshals an array of strings, and also accepts ""
+// as [], and "foo" as ["foo"].
+func (ss *sliceOrSingleString) UnmarshalJSON(data []byte) error {
+ if len(data) == 0 {
+ *ss = nil
+ } else if data[0] == '[' {
+ var slice []string
+ err := json.Unmarshal(data, &slice)
+ if err != nil {
+ return err
+ }
+ if len(slice) == 0 {
+ *ss = nil
+ } else {
+ *ss = slice
+ }
+ } else {
+ var str string
+ err := json.Unmarshal(data, &str)
+ if err != nil {
+ return err
+ }
+ if str == "" {
+ *ss = nil
+ } else {
+ *ss = []string{str}
+ }
+ }
+ return nil
}
type ec2Interface interface {
@@ -55,12 +94,15 @@ type ec2Interface interface {
ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error)
DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
+ DescribeInstanceStatusPages(input *ec2.DescribeInstanceStatusInput, fn func(*ec2.DescribeInstanceStatusOutput, bool) bool) error
+ DescribeSpotPriceHistoryPages(input *ec2.DescribeSpotPriceHistoryInput, fn func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error
CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
}
type ec2InstanceSet struct {
ec2config ec2InstanceSetConfig
+ currentSubnetIDIndex int32
instanceSetID cloud.InstanceSetID
logger logrus.FieldLogger
client ec2Interface
@@ -68,9 +110,16 @@ type ec2InstanceSet struct {
keys map[string]string
throttleDelayCreate atomic.Value
throttleDelayInstances atomic.Value
+
+ prices map[priceKey][]cloud.InstancePrice
+ pricesLock sync.Mutex
+ pricesUpdated map[priceKey]time.Time
+
+ mInstances *prometheus.GaugeVec
+ mInstanceStarts *prometheus.CounterVec
}
-func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (prv cloud.InstanceSet, err error) {
instanceSet := &ec2InstanceSet{
instanceSetID: instanceSetID,
logger: logger,
@@ -97,6 +146,36 @@ func newEC2InstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID
if instanceSet.ec2config.EBSVolumeType == "" {
instanceSet.ec2config.EBSVolumeType = "gp2"
}
+
+ // Set up metrics
+ instanceSet.mInstances = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "ec2_instances",
+ Help: "Number of instances running",
+ }, []string{"subnet_id"})
+ instanceSet.mInstanceStarts = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "ec2_instance_starts_total",
+ Help: "Number of attempts to start a new instance",
+ }, []string{"subnet_id", "success"})
+ // Initialize all of the series we'll be reporting. Otherwise
+ // the {subnet=A, success=0} series doesn't appear in metrics
+ // at all until there's a failure in subnet A.
+ for _, subnet := range instanceSet.ec2config.SubnetID {
+ instanceSet.mInstanceStarts.WithLabelValues(subnet, "0").Add(0)
+ instanceSet.mInstanceStarts.WithLabelValues(subnet, "1").Add(0)
+ }
+ if len(instanceSet.ec2config.SubnetID) == 0 {
+ instanceSet.mInstanceStarts.WithLabelValues("", "0").Add(0)
+ instanceSet.mInstanceStarts.WithLabelValues("", "1").Add(0)
+ }
+ if reg != nil {
+ reg.MustRegister(instanceSet.mInstances)
+ reg.MustRegister(instanceSet.mInstanceStarts)
+ }
+
return instanceSet, nil
}
@@ -140,40 +219,6 @@ func (instanceSet *ec2InstanceSet) Create(
initCommand cloud.InitCommand,
publicKey ssh.PublicKey) (cloud.Instance, error) {
- md5keyFingerprint, sha1keyFingerprint, err := awsKeyFingerprint(publicKey)
- if err != nil {
- return nil, fmt.Errorf("Could not make key fingerprint: %v", err)
- }
- instanceSet.keysMtx.Lock()
- var keyname string
- var ok bool
- if keyname, ok = instanceSet.keys[md5keyFingerprint]; !ok {
- keyout, err := instanceSet.client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{
- Filters: []*ec2.Filter{{
- Name: aws.String("fingerprint"),
- Values: []*string{&md5keyFingerprint, &sha1keyFingerprint},
- }},
- })
- if err != nil {
- return nil, fmt.Errorf("Could not search for keypair: %v", err)
- }
-
- if len(keyout.KeyPairs) > 0 {
- keyname = *(keyout.KeyPairs[0].KeyName)
- } else {
- keyname = "arvados-dispatch-keypair-" + md5keyFingerprint
- _, err := instanceSet.client.ImportKeyPair(&ec2.ImportKeyPairInput{
- KeyName: &keyname,
- PublicKeyMaterial: ssh.MarshalAuthorizedKey(publicKey),
- })
- if err != nil {
- return nil, fmt.Errorf("Could not import keypair: %v", err)
- }
- }
- instanceSet.keys[md5keyFingerprint] = keyname
- }
- instanceSet.keysMtx.Unlock()
-
ec2tags := []*ec2.Tag{}
for k, v := range newTags {
ec2tags = append(ec2tags, &ec2.Tag{
@@ -192,7 +237,6 @@ func (instanceSet *ec2InstanceSet) Create(
InstanceType: &instanceType.ProviderType,
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
- KeyName: &keyname,
NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
{
@@ -200,7 +244,6 @@ func (instanceSet *ec2InstanceSet) Create(
DeleteOnTermination: aws.Bool(true),
DeviceIndex: aws.Int64(0),
Groups: aws.StringSlice(groups),
- SubnetId: &instanceSet.ec2config.SubnetID,
}},
DisableApiTermination: aws.Bool(false),
InstanceInitiatedShutdownBehavior: aws.String("terminate"),
@@ -209,9 +252,23 @@ func (instanceSet *ec2InstanceSet) Create(
ResourceType: aws.String("instance"),
Tags: ec2tags,
}},
+ MetadataOptions: &ec2.InstanceMetadataOptionsRequest{
+ // Require IMDSv2, as described at
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html
+ HttpEndpoint: aws.String(ec2.InstanceMetadataEndpointStateEnabled),
+ HttpTokens: aws.String(ec2.HttpTokensStateRequired),
+ },
UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
}
+ if publicKey != nil {
+ keyname, err := instanceSet.getKeyName(publicKey)
+ if err != nil {
+ return nil, err
+ }
+ rii.KeyName = &keyname
+ }
+
if instanceType.AddedScratch > 0 {
rii.BlockDeviceMappings = []*ec2.BlockDeviceMapping{{
DeviceName: aws.String("/dev/xvdt"),
@@ -237,18 +294,89 @@ func (instanceSet *ec2InstanceSet) Create(
}
}
- rsv, err := instanceSet.client.RunInstances(&rii)
- err = wrapError(err, &instanceSet.throttleDelayCreate)
- if err != nil {
- return nil, err
+ var rsv *ec2.Reservation
+ var errToReturn error
+ subnets := instanceSet.ec2config.SubnetID
+ currentSubnetIDIndex := int(atomic.LoadInt32(&instanceSet.currentSubnetIDIndex))
+ for tryOffset := 0; ; tryOffset++ {
+ tryIndex := 0
+ trySubnet := ""
+ if len(subnets) > 0 {
+ tryIndex = (currentSubnetIDIndex + tryOffset) % len(subnets)
+ trySubnet = subnets[tryIndex]
+ rii.NetworkInterfaces[0].SubnetId = aws.String(trySubnet)
+ }
+ var err error
+ rsv, err = instanceSet.client.RunInstances(&rii)
+ instanceSet.mInstanceStarts.WithLabelValues(trySubnet, boolLabelValue[err == nil]).Add(1)
+ if !isErrorCapacity(errToReturn) || isErrorCapacity(err) {
+ // We want to return the last capacity error,
+ // if any; otherwise the last non-capacity
+ // error.
+ errToReturn = err
+ }
+ if isErrorSubnetSpecific(err) &&
+ tryOffset < len(subnets)-1 {
+ instanceSet.logger.WithError(err).WithField("SubnetID", subnets[tryIndex]).
+ Warn("RunInstances failed, trying next subnet")
+ continue
+ }
+ // Succeeded, or exhausted all subnets, or got a
+ // non-subnet-related error.
+ //
+ // We intentionally update currentSubnetIDIndex even
+ // in the non-retryable-failure case here to avoid a
+ // situation where successive calls to Create() keep
+ // returning errors for the same subnet (perhaps
+ // "subnet full") and never reveal the errors for the
+ // other configured subnets (perhaps "subnet ID
+ // invalid").
+ atomic.StoreInt32(&instanceSet.currentSubnetIDIndex, int32(tryIndex))
+ break
+ }
+ if rsv == nil || len(rsv.Instances) == 0 {
+ return nil, wrapError(errToReturn, &instanceSet.throttleDelayCreate)
}
-
return &ec2Instance{
provider: instanceSet,
instance: rsv.Instances[0],
}, nil
}
+func (instanceSet *ec2InstanceSet) getKeyName(publicKey ssh.PublicKey) (string, error) {
+ instanceSet.keysMtx.Lock()
+ defer instanceSet.keysMtx.Unlock()
+ md5keyFingerprint, sha1keyFingerprint, err := awsKeyFingerprint(publicKey)
+ if err != nil {
+ return "", fmt.Errorf("Could not make key fingerprint: %v", err)
+ }
+ if keyname, ok := instanceSet.keys[md5keyFingerprint]; ok {
+ return keyname, nil
+ }
+ keyout, err := instanceSet.client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{
+ Filters: []*ec2.Filter{{
+ Name: aws.String("fingerprint"),
+ Values: []*string{&md5keyFingerprint, &sha1keyFingerprint},
+ }},
+ })
+ if err != nil {
+ return "", fmt.Errorf("Could not search for keypair: %v", err)
+ }
+ if len(keyout.KeyPairs) > 0 {
+ return *(keyout.KeyPairs[0].KeyName), nil
+ }
+ keyname := "arvados-dispatch-keypair-" + md5keyFingerprint
+ _, err = instanceSet.client.ImportKeyPair(&ec2.ImportKeyPairInput{
+ KeyName: &keyname,
+ PublicKeyMaterial: ssh.MarshalAuthorizedKey(publicKey),
+ })
+ if err != nil {
+ return "", fmt.Errorf("Could not import keypair: %v", err)
+ }
+ instanceSet.keys[md5keyFingerprint] = keyname
+ return keyname, nil
+}
+
func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances []cloud.Instance, err error) {
var filters []*ec2.Filter
for k, v := range tags {
@@ -257,6 +385,7 @@ func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances
Values: []*string{aws.String(v)},
})
}
+ needAZs := false
dii := &ec2.DescribeInstancesInput{Filters: filters}
for {
dio, err := instanceSet.client.DescribeInstances(dii)
@@ -268,23 +397,168 @@ func (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances
for _, rsv := range dio.Reservations {
for _, inst := range rsv.Instances {
if *inst.State.Name != "shutting-down" && *inst.State.Name != "terminated" {
- instances = append(instances, &ec2Instance{instanceSet, inst})
+ instances = append(instances, &ec2Instance{
+ provider: instanceSet,
+ instance: inst,
+ })
+ if aws.StringValue(inst.InstanceLifecycle) == "spot" {
+ needAZs = true
+ }
}
}
}
if dio.NextToken == nil {
- return instances, err
+ break
}
dii.NextToken = dio.NextToken
}
+ if needAZs && instanceSet.ec2config.SpotPriceUpdateInterval > 0 {
+ az := map[string]string{}
+ err := instanceSet.client.DescribeInstanceStatusPages(&ec2.DescribeInstanceStatusInput{
+ IncludeAllInstances: aws.Bool(true),
+ }, func(page *ec2.DescribeInstanceStatusOutput, lastPage bool) bool {
+ for _, ent := range page.InstanceStatuses {
+ az[*ent.InstanceId] = *ent.AvailabilityZone
+ }
+ return true
+ })
+ if err != nil {
+ instanceSet.logger.Warnf("error getting instance statuses: %s", err)
+ }
+ for _, inst := range instances {
+ inst := inst.(*ec2Instance)
+ inst.availabilityZone = az[*inst.instance.InstanceId]
+ }
+ instanceSet.updateSpotPrices(instances)
+ }
+
+ // Count instances in each subnet, and report in metrics.
+ subnetInstances := map[string]int{"": 0}
+ for _, subnet := range instanceSet.ec2config.SubnetID {
+ subnetInstances[subnet] = 0
+ }
+ for _, inst := range instances {
+ subnet := inst.(*ec2Instance).instance.SubnetId
+ if subnet != nil {
+ subnetInstances[*subnet]++
+ } else {
+ subnetInstances[""]++
+ }
+ }
+ for subnet, count := range subnetInstances {
+ instanceSet.mInstances.WithLabelValues(subnet).Set(float64(count))
+ }
+
+ return instances, err
+}
+
+type priceKey struct {
+ instanceType string
+ spot bool
+ availabilityZone string
+}
+
+// Refresh recent spot instance pricing data for the given instances,
+// unless we already have recent pricing data for all relevant types.
+func (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance) {
+ if len(instances) == 0 {
+ return
+ }
+
+ instanceSet.pricesLock.Lock()
+ defer instanceSet.pricesLock.Unlock()
+ if instanceSet.prices == nil {
+ instanceSet.prices = map[priceKey][]cloud.InstancePrice{}
+ instanceSet.pricesUpdated = map[priceKey]time.Time{}
+ }
+
+ updateTime := time.Now()
+ staleTime := updateTime.Add(-instanceSet.ec2config.SpotPriceUpdateInterval.Duration())
+ needUpdate := false
+ allTypes := map[string]bool{}
+
+ for _, inst := range instances {
+ ec2inst := inst.(*ec2Instance).instance
+ if aws.StringValue(ec2inst.InstanceLifecycle) == "spot" {
+ pk := priceKey{
+ instanceType: *ec2inst.InstanceType,
+ spot: true,
+ availabilityZone: inst.(*ec2Instance).availabilityZone,
+ }
+ if instanceSet.pricesUpdated[pk].Before(staleTime) {
+ needUpdate = true
+ }
+ allTypes[*ec2inst.InstanceType] = true
+ }
+ }
+ if !needUpdate {
+ return
+ }
+ var typeFilterValues []*string
+ for instanceType := range allTypes {
+ typeFilterValues = append(typeFilterValues, aws.String(instanceType))
+ }
+ // Get 3x update interval worth of pricing data. (Ideally the
+ // AWS API would tell us "we have shown you all of the price
+ // changes up to time T", but it doesn't, so we'll just ask
+ // for 3 intervals worth of data on each update, de-duplicate
+ // the data points, and not worry too much about occasionally
+ // missing some data points when our lookups fail twice in a
+ // row.
+ dsphi := &ec2.DescribeSpotPriceHistoryInput{
+ StartTime: aws.Time(updateTime.Add(-3 * instanceSet.ec2config.SpotPriceUpdateInterval.Duration())),
+ Filters: []*ec2.Filter{
+ &ec2.Filter{Name: aws.String("instance-type"), Values: typeFilterValues},
+ &ec2.Filter{Name: aws.String("product-description"), Values: []*string{aws.String("Linux/UNIX")}},
+ },
+ }
+ err := instanceSet.client.DescribeSpotPriceHistoryPages(dsphi, func(page *ec2.DescribeSpotPriceHistoryOutput, lastPage bool) bool {
+ for _, ent := range page.SpotPriceHistory {
+ if ent.InstanceType == nil || ent.SpotPrice == nil || ent.Timestamp == nil {
+ // bogus record?
+ continue
+ }
+ price, err := strconv.ParseFloat(*ent.SpotPrice, 64)
+ if err != nil {
+ // bogus record?
+ continue
+ }
+ pk := priceKey{
+ instanceType: *ent.InstanceType,
+ spot: true,
+ availabilityZone: *ent.AvailabilityZone,
+ }
+ instanceSet.prices[pk] = append(instanceSet.prices[pk], cloud.InstancePrice{
+ StartTime: *ent.Timestamp,
+ Price: price,
+ })
+ instanceSet.pricesUpdated[pk] = updateTime
+ }
+ return true
+ })
+ if err != nil {
+ instanceSet.logger.Warnf("error retrieving spot instance prices: %s", err)
+ }
+
+ expiredTime := updateTime.Add(-64 * instanceSet.ec2config.SpotPriceUpdateInterval.Duration())
+ for pk, last := range instanceSet.pricesUpdated {
+ if last.Before(expiredTime) {
+ delete(instanceSet.pricesUpdated, pk)
+ delete(instanceSet.prices, pk)
+ }
+ }
+ for pk, prices := range instanceSet.prices {
+ instanceSet.prices[pk] = cloud.NormalizePriceHistory(prices)
+ }
}
func (instanceSet *ec2InstanceSet) Stop() {
}
type ec2Instance struct {
- provider *ec2InstanceSet
- instance *ec2.Instance
+ provider *ec2InstanceSet
+ instance *ec2.Instance
+ availabilityZone string // sometimes available for spot instances
}
func (inst *ec2Instance) ID() cloud.InstanceID {
@@ -348,6 +622,53 @@ func (inst *ec2Instance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {
return cloud.ErrNotImplemented
}
+// PriceHistory returns the price history for this specific instance.
+//
+// AWS documentation is elusive about whether the hourly cost of a
+// given spot instance changes as the current spot price changes for
+// the corresponding instance type and availability zone. Our
+// implementation assumes the answer is yes, based on the following
+// hints.
+//
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html
+// says: "After your Spot Instance is running, if the Spot price rises
+// above your maximum price, Amazon EC2 interrupts your Spot
+// Instance." (This doesn't address what happens when the spot price
+// rises *without* exceeding your maximum price.)
+//
+// https://docs.aws.amazon.com/whitepapers/latest/cost-optimization-leveraging-ec2-spot-instances/how-spot-instances-work.html
+// says: "You pay the Spot price that's in effect, billed to the
+// nearest second." (But it's not explicitly stated whether "the price
+// in effect" changes over time for a given instance.)
+//
+// The same page also says, in a discussion about the effect of
+// specifying a maximum price: "Note that you never pay more than the
+// Spot price that is in effect when your Spot Instance is running."
+// (The use of the phrase "is running", as opposed to "was launched",
+// hints that pricing is dynamic.)
+func (inst *ec2Instance) PriceHistory(instType arvados.InstanceType) []cloud.InstancePrice {
+ inst.provider.pricesLock.Lock()
+ defer inst.provider.pricesLock.Unlock()
+ // Note updateSpotPrices currently populates
+ // inst.provider.prices only for spot instances, so if
+ // spot==false here, we will return no data.
+ pk := priceKey{
+ instanceType: *inst.instance.InstanceType,
+ spot: aws.StringValue(inst.instance.InstanceLifecycle) == "spot",
+ availabilityZone: inst.availabilityZone,
+ }
+ var prices []cloud.InstancePrice
+ for _, price := range inst.provider.prices[pk] {
+ // ceil(added scratch space in GiB)
+ gib := (instType.AddedScratch + 1<<30 - 1) >> 30
+ monthly := inst.provider.ec2config.EBSPrice * float64(gib)
+ hourly := monthly / 30 / 24
+ price.Price += hourly
+ prices = append(prices, price)
+ }
+ return prices
+}
+
type rateLimitError struct {
error
earliestRetry time.Time
@@ -357,23 +678,77 @@ func (err rateLimitError) EarliestRetry() time.Time {
return err.earliestRetry
}
-var isCodeCapacity = map[string]bool{
- "InsufficientInstanceCapacity": true,
- "VcpuLimitExceeded": true,
- "MaxSpotInstanceCountExceeded": true,
+type capacityError struct {
+ error
+ isInstanceTypeSpecific bool
+}
+
+func (er *capacityError) IsCapacityError() bool {
+ return true
}
-// isErrorCapacity returns whether the error is to be throttled based on its code.
+func (er *capacityError) IsInstanceTypeSpecific() bool {
+ return er.isInstanceTypeSpecific
+}
+
+var isCodeQuota = map[string]bool{
+ "InstanceLimitExceeded": true,
+ "InsufficientAddressCapacity": true,
+ "InsufficientFreeAddressesInSubnet": true,
+ "InsufficientVolumeCapacity": true,
+ "MaxSpotInstanceCountExceeded": true,
+ "VcpuLimitExceeded": true,
+}
+
+// isErrorQuota returns whether the error indicates we have reached
+// some usage quota/limit -- i.e., immediately retrying with an equal
+// or larger instance type will probably not work.
+//
// Returns false if error is nil.
-func isErrorCapacity(err error) bool {
+func isErrorQuota(err error) bool {
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
- if _, ok := isCodeCapacity[aerr.Code()]; ok {
+ if _, ok := isCodeQuota[aerr.Code()]; ok {
return true
}
}
return false
}
+var reSubnetSpecificInvalidParameterMessage = regexp.MustCompile(`(?ms).*( subnet |sufficient free [Ii]pv[46] addresses).*`)
+
+// isErrorSubnetSpecific returns true if the problem encountered by
+// RunInstances might be avoided by trying a different subnet.
+func isErrorSubnetSpecific(err error) bool {
+ aerr, ok := err.(awserr.Error)
+ if !ok {
+ return false
+ }
+ code := aerr.Code()
+ return strings.Contains(code, "Subnet") ||
+ code == "InsufficientInstanceCapacity" ||
+ code == "InsufficientVolumeCapacity" ||
+ code == "Unsupported" ||
+ // See TestIsErrorSubnetSpecific for examples of why
+ // we look for substrings in code/message instead of
+ // only using specific codes here.
+ (strings.Contains(code, "InvalidParameter") &&
+ reSubnetSpecificInvalidParameterMessage.MatchString(aerr.Message()))
+}
+
+// isErrorCapacity returns true if the error indicates lack of
+// capacity (either temporary or permanent) to run a specific instance
+// type -- i.e., retrying with a different instance type might
+// succeed.
+func isErrorCapacity(err error) bool {
+ aerr, ok := err.(awserr.Error)
+ if !ok {
+ return false
+ }
+ code := aerr.Code()
+ return code == "InsufficientInstanceCapacity" ||
+ (code == "Unsupported" && strings.Contains(aerr.Message(), "requested instance type"))
+}
+
type ec2QuotaError struct {
error
}
@@ -395,8 +770,10 @@ func wrapError(err error, throttleValue *atomic.Value) error {
}
throttleValue.Store(d)
return rateLimitError{error: err, earliestRetry: time.Now().Add(d)}
- } else if isErrorCapacity(err) {
+ } else if isErrorQuota(err) {
return &ec2QuotaError{err}
+ } else if isErrorCapacity(err) {
+ return &capacityError{err, true}
} else if err != nil {
throttleValue.Store(time.Duration(0))
return err
@@ -404,3 +781,5 @@ func wrapError(err error, throttleValue *atomic.Value) error {
throttleValue.Store(time.Duration(0))
return nil
}
+
+var boolLabelValue = map[bool]string{false: "0", true: "1"}
diff --git a/lib/cloud/ec2/ec2_test.go b/lib/cloud/ec2/ec2_test.go
index 3cd238ded5..5e6cf2c82b 100644
--- a/lib/cloud/ec2/ec2_test.go
+++ b/lib/cloud/ec2/ec2_test.go
@@ -9,7 +9,7 @@
//
// Tests should be run individually and in the order they are listed in the file:
//
-// Example azconfig.yml:
+// Example ec2config.yml:
//
// ImageIDForTestSuite: ami-xxxxxxxxxxxxxxxxx
// DriverParameters:
@@ -24,17 +24,24 @@ package ec2
import (
"encoding/json"
+ "errors"
"flag"
+ "fmt"
"sync/atomic"
"testing"
+ "time"
"git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/config"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/ghodss/yaml"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
@@ -46,6 +53,34 @@ func Test(t *testing.T) {
check.TestingT(t)
}
+type sliceOrStringSuite struct{}
+
+var _ = check.Suite(&sliceOrStringSuite{})
+
+func (s *sliceOrStringSuite) TestUnmarshal(c *check.C) {
+ var conf ec2InstanceSetConfig
+ for _, trial := range []struct {
+ input string
+ output sliceOrSingleString
+ }{
+ {``, nil},
+ {`""`, nil},
+ {`[]`, nil},
+ {`"foo"`, sliceOrSingleString{"foo"}},
+ {`["foo"]`, sliceOrSingleString{"foo"}},
+ {`[foo]`, sliceOrSingleString{"foo"}},
+ {`["foo", "bar"]`, sliceOrSingleString{"foo", "bar"}},
+ {`[foo-bar, baz]`, sliceOrSingleString{"foo-bar", "baz"}},
+ } {
+ c.Logf("trial: %+v", trial)
+ err := yaml.Unmarshal([]byte("SubnetID: "+trial.input+"\n"), &conf)
+ if !c.Check(err, check.IsNil) {
+ continue
+ }
+ c.Check(conf.SubnetID, check.DeepEquals, trial.output)
+ }
+}
+
type EC2InstanceSetSuite struct{}
var _ = check.Suite(&EC2InstanceSetSuite{})
@@ -56,25 +91,104 @@ type testConfig struct {
}
type ec2stub struct {
+ c *check.C
+ reftime time.Time
+ importKeyPairCalls []*ec2.ImportKeyPairInput
+ describeKeyPairsCalls []*ec2.DescribeKeyPairsInput
+ runInstancesCalls []*ec2.RunInstancesInput
+ // {subnetID => error}: RunInstances returns error if subnetID
+ // matches.
+ subnetErrorOnRunInstances map[string]error
}
func (e *ec2stub) ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) {
+ e.importKeyPairCalls = append(e.importKeyPairCalls, input)
return nil, nil
}
func (e *ec2stub) DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) {
+ e.describeKeyPairsCalls = append(e.describeKeyPairsCalls, input)
return &ec2.DescribeKeyPairsOutput{}, nil
}
func (e *ec2stub) RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+ e.runInstancesCalls = append(e.runInstancesCalls, input)
+ if len(input.NetworkInterfaces) > 0 && input.NetworkInterfaces[0].SubnetId != nil {
+ err := e.subnetErrorOnRunInstances[*input.NetworkInterfaces[0].SubnetId]
+ if err != nil {
+ return nil, err
+ }
+ }
return &ec2.Reservation{Instances: []*ec2.Instance{{
- InstanceId: aws.String("i-123"),
- Tags: input.TagSpecifications[0].Tags,
+ InstanceId: aws.String("i-123"),
+ InstanceType: aws.String("t2.micro"),
+ Tags: input.TagSpecifications[0].Tags,
}}}, nil
}
func (e *ec2stub) DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
- return &ec2.DescribeInstancesOutput{}, nil
+ return &ec2.DescribeInstancesOutput{
+ Reservations: []*ec2.Reservation{{
+ Instances: []*ec2.Instance{{
+ InstanceId: aws.String("i-123"),
+ InstanceLifecycle: aws.String("spot"),
+ InstanceType: aws.String("t2.micro"),
+ PrivateIpAddress: aws.String("10.1.2.3"),
+ State: &ec2.InstanceState{Name: aws.String("running"), Code: aws.Int64(16)},
+ }, {
+ InstanceId: aws.String("i-124"),
+ InstanceLifecycle: aws.String("spot"),
+ InstanceType: aws.String("t2.micro"),
+ PrivateIpAddress: aws.String("10.1.2.4"),
+ State: &ec2.InstanceState{Name: aws.String("running"), Code: aws.Int64(16)},
+ }},
+ }},
+ }, nil
+}
+
+func (e *ec2stub) DescribeInstanceStatusPages(input *ec2.DescribeInstanceStatusInput, fn func(*ec2.DescribeInstanceStatusOutput, bool) bool) error {
+ fn(&ec2.DescribeInstanceStatusOutput{
+ InstanceStatuses: []*ec2.InstanceStatus{{
+ InstanceId: aws.String("i-123"),
+ AvailabilityZone: aws.String("aa-east-1a"),
+ }, {
+ InstanceId: aws.String("i-124"),
+ AvailabilityZone: aws.String("aa-east-1a"),
+ }},
+ }, true)
+ return nil
+}
+
+func (e *ec2stub) DescribeSpotPriceHistoryPages(input *ec2.DescribeSpotPriceHistoryInput, fn func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error {
+ if !fn(&ec2.DescribeSpotPriceHistoryOutput{
+ SpotPriceHistory: []*ec2.SpotPrice{
+ &ec2.SpotPrice{
+ InstanceType: aws.String("t2.micro"),
+ AvailabilityZone: aws.String("aa-east-1a"),
+ SpotPrice: aws.String("0.005"),
+ Timestamp: aws.Time(e.reftime.Add(-9 * time.Minute)),
+ },
+ &ec2.SpotPrice{
+ InstanceType: aws.String("t2.micro"),
+ AvailabilityZone: aws.String("aa-east-1a"),
+ SpotPrice: aws.String("0.015"),
+ Timestamp: aws.Time(e.reftime.Add(-5 * time.Minute)),
+ },
+ },
+ }, false) {
+ return nil
+ }
+ fn(&ec2.DescribeSpotPriceHistoryOutput{
+ SpotPriceHistory: []*ec2.SpotPrice{
+ &ec2.SpotPrice{
+ InstanceType: aws.String("t2.micro"),
+ AvailabilityZone: aws.String("aa-east-1a"),
+ SpotPrice: aws.String("0.01"),
+ Timestamp: aws.Time(e.reftime.Add(-2 * time.Minute)),
+ },
+ },
+ }, true)
+ return nil
}
func (e *ec2stub) CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
@@ -85,7 +199,21 @@ func (e *ec2stub) TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.T
return nil, nil
}
-func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
+type ec2stubError struct {
+ code string
+ message string
+}
+
+func (err *ec2stubError) Code() string { return err.code }
+func (err *ec2stubError) Message() string { return err.message }
+func (err *ec2stubError) Error() string { return fmt.Sprintf("%s: %s", err.code, err.message) }
+func (err *ec2stubError) OrigErr() error { return errors.New("stub OrigErr") }
+
+// Ensure ec2stubError satisfies the aws.Error interface
+var _ = awserr.Error(&ec2stubError{})
+
+func GetInstanceSet(c *check.C, conf string) (*ec2InstanceSet, cloud.ImageID, arvados.Cluster, *prometheus.Registry) {
+ reg := prometheus.NewRegistry()
cluster := arvados.Cluster{
InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
"tiny": {
@@ -98,7 +226,7 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
Preemptible: false,
},
"tiny-with-extra-scratch": {
- Name: "tiny",
+ Name: "tiny-with-extra-scratch",
ProviderType: "t2.micro",
VCPUs: 1,
RAM: 4000000000,
@@ -107,7 +235,7 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
AddedScratch: 20000000000,
},
"tiny-preemptible": {
- Name: "tiny",
+ Name: "tiny-preemptible",
ProviderType: "t2.micro",
VCPUs: 1,
RAM: 4000000000,
@@ -119,58 +247,51 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
if *live != "" {
var exampleCfg testConfig
err := config.LoadFile(&exampleCfg, *live)
- if err != nil {
- return nil, cloud.ImageID(""), cluster, err
- }
-
- ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger())
- return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
- }
- ap := ec2InstanceSet{
- ec2config: ec2InstanceSetConfig{},
- instanceSetID: "test123",
- logger: logrus.StandardLogger(),
- client: &ec2stub{},
- keys: make(map[string]string),
+ c.Assert(err, check.IsNil)
+
+ is, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", nil, logrus.StandardLogger(), reg)
+ c.Assert(err, check.IsNil)
+ return is.(*ec2InstanceSet), cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, reg
+ } else {
+ is, err := newEC2InstanceSet(json.RawMessage(conf), "test123", nil, ctxlog.TestLogger(c), reg)
+ c.Assert(err, check.IsNil)
+ is.(*ec2InstanceSet).client = &ec2stub{c: c, reftime: time.Now().UTC()}
+ return is.(*ec2InstanceSet), cloud.ImageID("blob"), cluster, reg
}
- return &ap, cloud.ImageID("blob"), cluster, nil
}
func (*EC2InstanceSetSuite) TestCreate(c *check.C) {
- ap, img, cluster, err := GetInstanceSet()
- if err != nil {
- c.Fatal("Error making provider", err)
- }
-
+ ap, img, cluster, _ := GetInstanceSet(c, "{}")
pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
- c.Assert(err, check.IsNil)
inst, err := ap.Create(cluster.InstanceTypes["tiny"],
img, map[string]string{
"TestTagName": "test tag value",
}, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
-
c.Assert(err, check.IsNil)
tags := inst.Tags()
c.Check(tags["TestTagName"], check.Equals, "test tag value")
c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
-}
+ if *live == "" {
+ c.Check(ap.client.(*ec2stub).describeKeyPairsCalls, check.HasLen, 1)
+ c.Check(ap.client.(*ec2stub).importKeyPairCalls, check.HasLen, 1)
-func (*EC2InstanceSetSuite) TestCreateWithExtraScratch(c *check.C) {
- ap, img, cluster, err := GetInstanceSet()
- if err != nil {
- c.Fatal("Error making provider", err)
+ runcalls := ap.client.(*ec2stub).runInstancesCalls
+ if c.Check(runcalls, check.HasLen, 1) {
+ c.Check(runcalls[0].MetadataOptions.HttpEndpoint, check.DeepEquals, aws.String("enabled"))
+ c.Check(runcalls[0].MetadataOptions.HttpTokens, check.DeepEquals, aws.String("required"))
+ }
}
+}
- pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
- c.Assert(err, check.IsNil)
-
+func (*EC2InstanceSetSuite) TestCreateWithExtraScratch(c *check.C) {
+ ap, img, cluster, _ := GetInstanceSet(c, "{}")
inst, err := ap.Create(cluster.InstanceTypes["tiny-with-extra-scratch"],
img, map[string]string{
"TestTagName": "test tag value",
- }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+ }, "umask 0600; echo -n test-file-data >/var/run/test-file", nil)
c.Assert(err, check.IsNil)
@@ -178,16 +299,17 @@ func (*EC2InstanceSetSuite) TestCreateWithExtraScratch(c *check.C) {
c.Check(tags["TestTagName"], check.Equals, "test tag value")
c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+ if *live == "" {
+ // Should not have called key pair APIs, because
+ // publickey arg was nil
+ c.Check(ap.client.(*ec2stub).describeKeyPairsCalls, check.HasLen, 0)
+ c.Check(ap.client.(*ec2stub).importKeyPairCalls, check.HasLen, 0)
+ }
}
func (*EC2InstanceSetSuite) TestCreatePreemptible(c *check.C) {
- ap, img, cluster, err := GetInstanceSet()
- if err != nil {
- c.Fatal("Error making provider", err)
- }
-
+ ap, img, cluster, _ := GetInstanceSet(c, "{}")
pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
- c.Assert(err, check.IsNil)
inst, err := ap.Create(cluster.InstanceTypes["tiny-preemptible"],
img, map[string]string{
@@ -202,12 +324,171 @@ func (*EC2InstanceSetSuite) TestCreatePreemptible(c *check.C) {
}
-func (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {
- ap, _, _, err := GetInstanceSet()
- if err != nil {
- c.Fatal("Error making provider", err)
+func (*EC2InstanceSetSuite) TestCreateFailoverSecondSubnet(c *check.C) {
+ if *live != "" {
+ c.Skip("not applicable in live mode")
+ return
+ }
+
+ ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-good"]}`)
+ ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
+ "subnet-full": &ec2stubError{
+ code: "InsufficientFreeAddressesInSubnet",
+ message: "subnet is full",
+ },
+ }
+ inst, err := ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
+ c.Check(err, check.IsNil)
+ c.Check(inst, check.NotNil)
+ c.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 2)
+ metrics := arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `(?ms).*`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="0"} 1\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="1"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-good",success="0"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-good",success="1"} 1\n`+
+ `.*`)
+
+ // Next RunInstances call should try the working subnet first
+ inst, err = ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
+ c.Check(err, check.IsNil)
+ c.Check(inst, check.NotNil)
+ c.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 3)
+ metrics = arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `(?ms).*`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="0"} 1\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="1"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-good",success="0"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-good",success="1"} 2\n`+
+ `.*`)
+}
+
+func (*EC2InstanceSetSuite) TestIsErrorSubnetSpecific(c *check.C) {
+ c.Check(isErrorSubnetSpecific(nil), check.Equals, false)
+ c.Check(isErrorSubnetSpecific(errors.New("misc error")), check.Equals, false)
+
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "InsufficientInstanceCapacity",
+ }), check.Equals, true)
+
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "InsufficientVolumeCapacity",
+ }), check.Equals, true)
+
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "InsufficientFreeAddressesInSubnet",
+ message: "Not enough free addresses in subnet subnet-abcdefg\n\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789",
+ }), check.Equals, true)
+
+ // #21603: (Sometimes?) EC2 returns code InvalidParameterValue
+ // even though the code "InsufficientFreeAddressesInSubnet"
+ // seems like it must be meant for exactly this error.
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "InvalidParameterValue",
+ message: "Not enough free addresses in subnet subnet-abcdefg\n\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789",
+ }), check.Equals, true)
+
+ // Similarly, AWS docs
+ // (https://repost.aws/knowledge-center/vpc-insufficient-ip-errors)
+ // suggest the following code/message combinations also exist.
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "Client.InvalidParameterValue",
+ message: "There aren't sufficient free Ipv4 addresses or prefixes",
+ }), check.Equals, true)
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "InvalidParameterValue",
+ message: "There aren't sufficient free Ipv4 addresses or prefixes",
+ }), check.Equals, true)
+ // Meanwhile, other AWS docs
+ // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html)
+ // suggest Client.InvalidParameterValue is not a real code but
+ // ClientInvalidParameterValue is.
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "ClientInvalidParameterValue",
+ message: "There aren't sufficient free Ipv4 addresses or prefixes",
+ }), check.Equals, true)
+
+ c.Check(isErrorSubnetSpecific(&ec2stubError{
+ code: "InvalidParameterValue",
+ message: "Some other invalid parameter error",
+ }), check.Equals, false)
+}
+
+func (*EC2InstanceSetSuite) TestCreateAllSubnetsFailing(c *check.C) {
+ if *live != "" {
+ c.Skip("not applicable in live mode")
+ return
+ }
+
+ ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-broken"]}`)
+ ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
+ "subnet-full": &ec2stubError{
+ code: "InsufficientFreeAddressesInSubnet",
+ message: "subnet is full",
+ },
+ "subnet-broken": &ec2stubError{
+ code: "InvalidSubnetId.NotFound",
+ message: "bogus subnet id",
+ },
}
+ _, err := ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
+ c.Check(err, check.NotNil)
+ c.Check(err, check.ErrorMatches, `.*InvalidSubnetId\.NotFound.*`)
+ c.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 2)
+ metrics := arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `(?ms).*`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="0"} 1\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="1"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="0"} 1\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="1"} 0\n`+
+ `.*`)
+
+ _, err = ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
+ c.Check(err, check.NotNil)
+ c.Check(err, check.ErrorMatches, `.*InsufficientFreeAddressesInSubnet.*`)
+ c.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 4)
+ metrics = arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `(?ms).*`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="0"} 2\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="1"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="0"} 2\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="1"} 0\n`+
+ `.*`)
+}
+func (*EC2InstanceSetSuite) TestCreateOneSubnetFailingCapacity(c *check.C) {
+ if *live != "" {
+ c.Skip("not applicable in live mode")
+ return
+ }
+ ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-broken"]}`)
+ ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
+ "subnet-full": &ec2stubError{
+ code: "InsufficientFreeAddressesInSubnet",
+ message: "subnet is full",
+ },
+ "subnet-broken": &ec2stubError{
+ code: "InsufficientInstanceCapacity",
+ message: "insufficient capacity",
+ },
+ }
+ for i := 0; i < 3; i++ {
+ _, err := ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
+ c.Check(err, check.NotNil)
+ c.Check(err, check.ErrorMatches, `.*InsufficientInstanceCapacity.*`)
+ }
+ c.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 6)
+ metrics := arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `(?ms).*`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="0"} 3\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="1"} 0\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="0"} 3\n`+
+ `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="1"} 0\n`+
+ `.*`)
+}
+
+func (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {
+ ap, _, _, _ := GetInstanceSet(c, "{}")
l, err := ap.Instances(nil)
c.Assert(err, check.IsNil)
@@ -219,27 +500,23 @@ func (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {
}
func (*EC2InstanceSetSuite) TestListInstances(c *check.C) {
- ap, _, _, err := GetInstanceSet()
- if err != nil {
- c.Fatal("Error making provider: ", err)
- }
-
+ ap, _, _, reg := GetInstanceSet(c, "{}")
l, err := ap.Instances(nil)
-
c.Assert(err, check.IsNil)
for _, i := range l {
tg := i.Tags()
c.Logf("%v %v %v", i.String(), i.Address(), tg)
}
+
+ metrics := arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `(?ms).*`+
+ `arvados_dispatchcloud_ec2_instances{subnet_id="[^"]*"} \d+\n`+
+ `.*`)
}
func (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {
- ap, _, _, err := GetInstanceSet()
- if err != nil {
- c.Fatal("Error making provider", err)
- }
-
+ ap, _, _, _ := GetInstanceSet(c, "{}")
l, err := ap.Instances(nil)
c.Assert(err, check.IsNil)
@@ -248,14 +525,94 @@ func (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {
}
}
+func (*EC2InstanceSetSuite) TestInstancePriceHistory(c *check.C) {
+ ap, img, cluster, _ := GetInstanceSet(c, "{}")
+ pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+ tags := cloud.InstanceTags{"arvados-ec2-driver": "test"}
+
+ defer func() {
+ instances, err := ap.Instances(tags)
+ c.Assert(err, check.IsNil)
+ for _, inst := range instances {
+ c.Logf("cleanup: destroy instance %s", inst)
+ c.Check(inst.Destroy(), check.IsNil)
+ }
+ }()
+
+ ap.ec2config.SpotPriceUpdateInterval = arvados.Duration(time.Hour)
+ ap.ec2config.EBSPrice = 0.1 // $/GiB/month
+ inst1, err := ap.Create(cluster.InstanceTypes["tiny-preemptible"], img, tags, "true", pk)
+ c.Assert(err, check.IsNil)
+ defer inst1.Destroy()
+ inst2, err := ap.Create(cluster.InstanceTypes["tiny-preemptible"], img, tags, "true", pk)
+ c.Assert(err, check.IsNil)
+ defer inst2.Destroy()
+
+ // in live mode, we need to wait for the instances to reach
+ // running state before we can discover their availability
+ // zones and look up the appropriate prices.
+ var instances []cloud.Instance
+ for deadline := time.Now().Add(5 * time.Minute); ; {
+ if deadline.Before(time.Now()) {
+ c.Fatal("timed out")
+ }
+ instances, err = ap.Instances(tags)
+ running := 0
+ for _, inst := range instances {
+ ec2i := inst.(*ec2Instance).instance
+ if *ec2i.InstanceLifecycle == "spot" && *ec2i.State.Code&16 != 0 {
+ running++
+ }
+ }
+ if running >= 2 {
+ c.Logf("instances are running, and identifiable as spot instances")
+ break
+ }
+ c.Logf("waiting for instances to reach running state so their availability zone becomes visible...")
+ time.Sleep(10 * time.Second)
+ }
+
+ for _, inst := range instances {
+ hist := inst.PriceHistory(arvados.InstanceType{})
+ c.Logf("%s price history: %v", inst.ID(), hist)
+ c.Check(len(hist) > 0, check.Equals, true)
+
+ histWithScratch := inst.PriceHistory(arvados.InstanceType{AddedScratch: 640 << 30})
+ c.Logf("%s price history with 640 GiB scratch: %v", inst.ID(), histWithScratch)
+
+ for i, ip := range hist {
+ c.Check(ip.Price, check.Not(check.Equals), 0.0)
+ if i > 0 {
+ c.Check(ip.StartTime.Before(hist[i-1].StartTime), check.Equals, true)
+ }
+ c.Check(ip.Price < histWithScratch[i].Price, check.Equals, true)
+ }
+ }
+}
+
func (*EC2InstanceSetSuite) TestWrapError(c *check.C) {
retryError := awserr.New("Throttling", "", nil)
wrapped := wrapError(retryError, &atomic.Value{})
_, ok := wrapped.(cloud.RateLimitError)
c.Check(ok, check.Equals, true)
- quotaError := awserr.New("InsufficientInstanceCapacity", "", nil)
+ quotaError := awserr.New("InstanceLimitExceeded", "", nil)
wrapped = wrapError(quotaError, nil)
_, ok = wrapped.(cloud.QuotaError)
c.Check(ok, check.Equals, true)
+
+ for _, trial := range []struct {
+ code string
+ msg string
+ }{
+ {"InsufficientInstanceCapacity", ""},
+ {"Unsupported", "Your requested instance type (t3.micro) is not supported in your requested Availability Zone (us-east-1e). Please retry your request by not specifying an Availability Zone or choosing us-east-1a, us-east-1b, us-east-1c, us-east-1d, us-east-1f."},
+ } {
+ capacityError := awserr.New(trial.code, trial.msg, nil)
+ wrapped = wrapError(capacityError, nil)
+ caperr, ok := wrapped.(cloud.CapacityError)
+ c.Check(ok, check.Equals, true)
+ c.Check(caperr.IsCapacityError(), check.Equals, true)
+ c.Check(caperr.IsInstanceTypeSpecific(), check.Equals, true)
+ }
}
diff --git a/lib/cloud/interfaces.go b/lib/cloud/interfaces.go
index 2d53a49c51..a2aa9e1432 100644
--- a/lib/cloud/interfaces.go
+++ b/lib/cloud/interfaces.go
@@ -11,6 +11,7 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
@@ -36,6 +37,20 @@ type QuotaError interface {
error
}
+// A CapacityError should be returned by an InstanceSet's Create
+// method when the cloud service indicates it has insufficient
+// capacity to create new instances -- i.e., we shouldn't retry right
+// away.
+type CapacityError interface {
+ // If true, wait before trying to create more instances.
+ IsCapacityError() bool
+ // If true, the condition is specific to the requested
+ // instance types. Wait before trying to create more
+ // instances of that same type.
+ IsInstanceTypeSpecific() bool
+ error
+}
+
type SharedResourceTags map[string]string
type InstanceSetID string
type InstanceTags map[string]string
@@ -102,6 +117,12 @@ type Instance interface {
// Replace tags with the given tags
SetTags(InstanceTags) error
+ // Get recent price history, if available. The InstanceType is
+ // supplied as an argument so the driver implementation can
+ // account for AddedScratch cost without requesting the volume
+ // attachment information from the provider's API.
+ PriceHistory(arvados.InstanceType) []InstancePrice
+
// Shut down the node
Destroy() error
}
@@ -141,6 +162,11 @@ type InstanceSet interface {
Stop()
}
+type InstancePrice struct {
+ StartTime time.Time
+ Price float64
+}
+
type InitCommand string
// A Driver returns an InstanceSet that uses the given InstanceSetID
@@ -180,7 +206,7 @@ type InitCommand string
//
// type exampleDriver struct {}
//
-// func (*exampleDriver) InstanceSet(config json.RawMessage, id cloud.InstanceSetID, tags cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+// func (*exampleDriver) InstanceSet(config json.RawMessage, id cloud.InstanceSetID, tags cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {
// var is exampleInstanceSet
// if err := json.Unmarshal(config, &is); err != nil {
// return nil, err
@@ -188,20 +214,18 @@ type InitCommand string
// is.ownID = id
// return &is, nil
// }
-//
-// var _ = registerCloudDriver("example", &exampleDriver{})
type Driver interface {
- InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)
+ InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error)
}
// DriverFunc makes a Driver using the provided function as its
// InstanceSet method. This is similar to http.HandlerFunc.
-func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
+func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error)) Driver {
return driverFunc(fn)
}
-type driverFunc func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error)
+type driverFunc func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error)
-func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger) (InstanceSet, error) {
- return df(config, id, tags, logger)
+func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error) {
+ return df(config, id, tags, logger, reg)
}
diff --git a/lib/cloud/loopback/loopback.go b/lib/cloud/loopback/loopback.go
index 6ad4f876d9..41878acd22 100644
--- a/lib/cloud/loopback/loopback.go
+++ b/lib/cloud/loopback/loopback.go
@@ -11,6 +11,7 @@ import (
"encoding/json"
"errors"
"io"
+ "os"
"os/exec"
"os/user"
"strings"
@@ -20,6 +21,7 @@ import (
"git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
@@ -44,7 +46,7 @@ type instanceSet struct {
mtx sync.Mutex
}
-func newInstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+func newInstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {
is := &instanceSet{
instanceSetID: instanceSetID,
logger: logger,
@@ -58,6 +60,16 @@ func (is *instanceSet) Create(it arvados.InstanceType, _ cloud.ImageID, tags clo
if len(is.instances) > 0 {
return nil, errQuota
}
+ // A crunch-run process running in a previous instance may
+ // have marked the node as broken. In the loopback scenario a
+ // destroy+create cycle doesn't fix whatever was broken -- but
+ // nothing else will either, so the best we can do is remove
+ // the "broken" flag and try again.
+ if err := os.Remove("/var/lock/crunch-run-broken"); err == nil {
+ is.logger.Info("removed /var/lock/crunch-run-broken")
+ } else if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
u, err := user.Current()
if err != nil {
return nil, err
@@ -119,12 +131,13 @@ type instance struct {
sshService test.SSHService
}
-func (i *instance) ID() cloud.InstanceID { return cloud.InstanceID(i.instanceType.ProviderType) }
-func (i *instance) String() string { return i.instanceType.ProviderType }
-func (i *instance) ProviderType() string { return i.instanceType.ProviderType }
-func (i *instance) Address() string { return i.sshService.Address() }
-func (i *instance) RemoteUser() string { return i.adminUser }
-func (i *instance) Tags() cloud.InstanceTags { return i.tags }
+func (i *instance) ID() cloud.InstanceID { return cloud.InstanceID(i.instanceType.ProviderType) }
+func (i *instance) String() string { return i.instanceType.ProviderType }
+func (i *instance) ProviderType() string { return i.instanceType.ProviderType }
+func (i *instance) Address() string { return i.sshService.Address() }
+func (i *instance) PriceHistory(arvados.InstanceType) []cloud.InstancePrice { return nil }
+func (i *instance) RemoteUser() string { return i.adminUser }
+func (i *instance) Tags() cloud.InstanceTags { return i.tags }
func (i *instance) SetTags(tags cloud.InstanceTags) error {
i.tags = tags
return nil
diff --git a/lib/cloud/loopback/loopback_test.go b/lib/cloud/loopback/loopback_test.go
index 5c30f5f0e1..0716179cb7 100644
--- a/lib/cloud/loopback/loopback_test.go
+++ b/lib/cloud/loopback/loopback_test.go
@@ -29,7 +29,7 @@ var _ = check.Suite(&suite{})
func (*suite) TestCreateListExecDestroy(c *check.C) {
logger := ctxlog.TestLogger(c)
- is, err := Driver.InstanceSet(json.RawMessage("{}"), "testInstanceSetID", cloud.SharedResourceTags{"sharedTag": "sharedTagValue"}, logger)
+ is, err := Driver.InstanceSet(json.RawMessage("{}"), "testInstanceSetID", cloud.SharedResourceTags{"sharedTag": "sharedTagValue"}, logger, nil)
c.Assert(err, check.IsNil)
clientRSAKey, err := rsa.GenerateKey(rand.Reader, 1024)
diff --git a/lib/cloud/price.go b/lib/cloud/price.go
new file mode 100644
index 0000000000..59f5afc94b
--- /dev/null
+++ b/lib/cloud/price.go
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloud
+
+import (
+ "sort"
+)
+
+// NormalizePriceHistory de-duplicates and sorts instance prices, most
+// recent first.
+func NormalizePriceHistory(prices []InstancePrice) []InstancePrice {
+ // copy provided slice instead of modifying it in place
+ prices = append([]InstancePrice(nil), prices...)
+ // sort by timestamp, newest first
+ sort.Slice(prices, func(i, j int) bool {
+ return prices[i].StartTime.After(prices[j].StartTime)
+ })
+ // remove duplicate data points, keeping the oldest
+ for i := 0; i < len(prices)-1; i++ {
+ if prices[i].StartTime == prices[i+1].StartTime || prices[i].Price == prices[i+1].Price {
+ prices = append(prices[:i], prices[i+1:]...)
+ i--
+ }
+ }
+ return prices
+}
diff --git a/lib/cloud/price_test.go b/lib/cloud/price_test.go
new file mode 100644
index 0000000000..e2a4a7e13c
--- /dev/null
+++ b/lib/cloud/price_test.go
@@ -0,0 +1,32 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloud
+
+import (
+ "testing"
+ "time"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type cloudSuite struct{}
+
+var _ = Suite(&cloudSuite{})
+
+func (s *cloudSuite) TestNormalizePriceHistory(c *C) {
+ t0, err := time.Parse(time.RFC3339, "2023-01-01T01:00:00Z")
+ c.Assert(err, IsNil)
+ h := []InstancePrice{
+ {t0.Add(1 * time.Minute), 1.0},
+ {t0.Add(4 * time.Minute), 1.2}, // drop: unchanged price
+ {t0.Add(5 * time.Minute), 1.1},
+ {t0.Add(3 * time.Minute), 1.2},
+ {t0.Add(5 * time.Minute), 1.1}, // drop: duplicate
+ {t0.Add(2 * time.Minute), 1.0}, // drop: out of order, unchanged price
+ }
+ c.Check(NormalizePriceHistory(h), DeepEquals, []InstancePrice{h[2], h[3], h[0]})
+}
diff --git a/lib/cmd/cmd.go b/lib/cmd/cmd.go
index a03cb90f68..40e80f5eaa 100644
--- a/lib/cmd/cmd.go
+++ b/lib/cmd/cmd.go
@@ -14,12 +14,15 @@ import (
"path/filepath"
"regexp"
"runtime"
+ "runtime/debug"
"sort"
"strings"
"github.com/sirupsen/logrus"
)
+const EXIT_INVALIDARGUMENT = 2
+
type Handler interface {
RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int
}
@@ -35,7 +38,13 @@ func (f HandlerFunc) RunCommand(prog string, args []string, stdin io.Reader, std
// 0.
var Version versionCommand
-var version = "dev"
+var (
+ // These default version/commit strings should be set at build
+ // time: `go install -buildvcs=false -ldflags "-X
+ // git.arvados.org/arvados.git/lib/cmd.version=1.2.3"`
+ version = "dev"
+ commit = "0000000000000000000000000000000000000000"
+)
type versionCommand struct{}
@@ -43,6 +52,17 @@ func (versionCommand) String() string {
return fmt.Sprintf("%s (%s)", version, runtime.Version())
}
+func (versionCommand) Commit() string {
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ for _, bs := range bi.Settings {
+ if bs.Key == "vcs.revision" {
+ return bs.Value
+ }
+ }
+ }
+ return commit
+}
+
func (versionCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
prog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, "")
fmt.Fprintf(stdout, "%s %s (%s)\n", prog, version, runtime.Version())
@@ -55,12 +75,12 @@ func (versionCommand) RunCommand(prog string, args []string, stdin io.Reader, st
//
// Example:
//
-// os.Exit(Multi(map[string]Handler{
-// "foobar": HandlerFunc(func(prog string, args []string) int {
-// fmt.Println(args[0])
-// return 2
-// }),
-// })("/usr/bin/multi", []string{"foobar", "baz"}, os.Stdin, os.Stdout, os.Stderr))
+// os.Exit(Multi(map[string]Handler{
+// "foobar": HandlerFunc(func(prog string, args []string) int {
+// fmt.Println(args[0])
+// return 2
+// }),
+// })("/usr/bin/multi", []string{"foobar", "baz"}, os.Stdin, os.Stdout, os.Stderr))
//
// ...prints "baz" and exits 2.
type Multi map[string]Handler
@@ -86,13 +106,13 @@ func (m Multi) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
} else if len(args) < 1 {
fmt.Fprintf(stderr, "usage: %s command [args]\n", prog)
m.Usage(stderr)
- return 2
+ return EXIT_INVALIDARGUMENT
} else if cmd, ok = m[args[0]]; ok {
return cmd.RunCommand(prog+" "+args[0], args[1:], stdin, stdout, stderr)
} else {
fmt.Fprintf(stderr, "%s: unrecognized command %q\n", prog, args[0])
m.Usage(stderr)
- return 2
+ return EXIT_INVALIDARGUMENT
}
}
diff --git a/lib/cmd/parseflags.go b/lib/cmd/parseflags.go
index 3e872fcd11..275e063f31 100644
--- a/lib/cmd/parseflags.go
+++ b/lib/cmd/parseflags.go
@@ -8,8 +8,13 @@ import (
"flag"
"fmt"
"io"
+ "reflect"
)
+// Hack to enable checking whether a given FlagSet's Usage method is
+// the (private) default one.
+var defaultFlagSet = flag.NewFlagSet("none", flag.ContinueOnError)
+
// ParseFlags calls f.Parse(args) and prints appropriate error/help
// messages to stderr.
//
@@ -30,11 +35,16 @@ func ParseFlags(f FlagSet, prog string, args []string, positional string, stderr
case nil:
if f.NArg() > 0 && positional == "" {
fmt.Fprintf(stderr, "unrecognized command line arguments: %v (try -help)\n", f.Args())
- return false, 2
+ return false, EXIT_INVALIDARGUMENT
}
return true, 0
case flag.ErrHelp:
- if f, ok := f.(*flag.FlagSet); ok && f.Usage != nil {
+ // Use our own default usage func, not the one
+ // provided by the flag pkg, if the caller hasn't set
+ // one. (We use reflect to determine whether f.Usage
+ // is the private defaultUsage func that
+ // flag.NewFlagSet uses.)
+ if f, ok := f.(*flag.FlagSet); ok && f.Usage != nil && reflect.ValueOf(f.Usage).String() != reflect.ValueOf(defaultFlagSet.Usage).String() {
f.SetOutput(stderr)
f.Usage()
} else {
@@ -45,6 +55,6 @@ func ParseFlags(f FlagSet, prog string, args []string, positional string, stderr
return false, 0
default:
fmt.Fprintf(stderr, "error parsing command line arguments: %s (try -help)\n", err)
- return false, 2
+ return false, EXIT_INVALIDARGUMENT
}
}
diff --git a/lib/config/cmd_test.go b/lib/config/cmd_test.go
index 9503a54d2d..c2854895ca 100644
--- a/lib/config/cmd_test.go
+++ b/lib/config/cmd_test.go
@@ -33,7 +33,7 @@ func (s *CommandSuite) SetUpSuite(c *check.C) {
func (s *CommandSuite) TestDump_BadArg(c *check.C) {
var stderr bytes.Buffer
code := DumpCommand.RunCommand("arvados config-dump", []string{"-badarg"}, bytes.NewBuffer(nil), bytes.NewBuffer(nil), &stderr)
- c.Check(code, check.Equals, 2)
+ c.Check(code, check.Equals, cmd.EXIT_INVALIDARGUMENT)
c.Check(stderr.String(), check.Equals, "error parsing command line arguments: flag provided but not defined: -badarg (try -help)\n")
}
@@ -69,8 +69,6 @@ Clusters:
Type: select
Options:
fuchsia: {}
- ApplicationMimetypesWithViewIcon:
- whitespace: {}
`
code := CheckCommand.RunCommand("arvados config-check", []string{"-config", "-"}, bytes.NewBufferString(in), &stdout, &stderr)
c.Check(code, check.Equals, 0)
@@ -124,8 +122,6 @@ Clusters:
Type: select
Options:
fuchsia: {}
- ApplicationMimetypesWithViewIcon:
- whitespace: {}
`
code := CheckCommand.RunCommand("arvados config-check", []string{"-config", "-"}, bytes.NewBufferString(in), &stdout, &stderr)
c.Check(code, check.Equals, 1)
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index b23c6a1274..14e839a6cd 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -223,9 +223,48 @@ Clusters:
# parameter higher than this value, this value is used instead.
MaxItemsPerResponse: 1000
- # Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit.
- MaxConcurrentRequests: 0
+ # Maximum number of concurrent requests to process concurrently
+ # in a single service process, or 0 for no limit.
+ #
+ # Note this applies to all Arvados services (controller, webdav,
+ # websockets, etc.). Concurrency in the controller service is
+ # also effectively limited by MaxConcurrentRailsRequests (see
+ # below) because most controller requests proxy through to the
+ # RailsAPI service.
+ #
+ # HTTP proxies and load balancers downstream of arvados services
+ # should be configured to allow at least {MaxConcurrentRequest +
+ # MaxQueuedRequests + MaxGatewayTunnels} concurrent requests.
+ MaxConcurrentRequests: 64
+
+ # Maximum number of concurrent requests to process concurrently
+ # in a single RailsAPI service process, or 0 for no limit.
+ MaxConcurrentRailsRequests: 8
+
+ # Maximum number of incoming requests to hold in a priority
+ # queue waiting for one of the MaxConcurrentRequests slots to be
+ # free. When the queue is longer than this, respond 503 to the
+ # lowest priority request.
+ #
+ # If MaxQueuedRequests is 0, respond 503 immediately to
+ # additional requests while at the MaxConcurrentRequests limit.
+ MaxQueuedRequests: 128
+
+ # Maximum time a "lock container" request is allowed to wait in
+ # the incoming request queue before returning 503.
+ MaxQueueTimeForLockRequests: 2s
+
+ # Maximum number of active gateway tunnel connections. One slot
+ # is consumed by each "container shell" connection. If using an
+ # HPC dispatcher (LSF or Slurm), one slot is consumed by each
+ # running container. These do not count toward
+ # MaxConcurrentRequests.
+ MaxGatewayTunnels: 1000
+
+ # Fraction of MaxConcurrentRequests that can be "log create"
+ # messages at any given time. This is to prevent logging
+ # updates from crowding out more important requests.
+ LogCreateRequestFraction: 0.50
# Maximum number of 64MiB memory buffers per Keepstore server process, or
# 0 for no limit. When this limit is reached, up to
@@ -288,6 +327,9 @@ Clusters:
# any user with "manage" permission can un-freeze.
UnfreezeProjectRequiresAdmin: false
+ # (Experimental) Use row-level locking on update API calls.
+ LockBeforeUpdate: false
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
@@ -323,34 +365,59 @@ Clusters:
# false.
ActivatedUsersAreVisibleToOthers: true
- # The e-mail address of the user you would like to become marked as an admin
- # user on their first login.
+ # If a user creates an account with this email address, they
+ # will be automatically set to admin.
AutoAdminUserWithEmail: ""
# If AutoAdminFirstUser is set to true, the first user to log in when no
# other admin users exist will automatically become an admin user.
AutoAdminFirstUser: false
- # Email address to notify whenever a user creates a profile for the
- # first time
+ # Recipient for notification email sent out when a user sets a
+ # profile on their account.
UserProfileNotificationAddress: ""
+
+ # When sending a NewUser, NewInactiveUser, or UserProfile
+ # notification, this is the 'From' address to use
AdminNotifierEmailFrom: arvados@example.com
+
+ # Prefix for email subjects for NewUser and NewInactiveUser emails
EmailSubjectPrefix: "[ARVADOS] "
+
+ # When sending a welcome email to the user, the 'From' address to use
UserNotifierEmailFrom: arvados@example.com
- UserNotifierEmailBcc: {}
- NewUserNotificationRecipients: {}
- NewInactiveUserNotificationRecipients: {}
+
+ # The welcome email sent to new users will be blind copied to
+ # these addresses.
+ UserNotifierEmailBcc:
+ SAMPLE: {}
+
+ # Recipients for notification email sent out when a user account
+ # is created and already set up to be able to log in
+ NewUserNotificationRecipients:
+ SAMPLE: {}
+
+ # Recipients for notification email sent out when a user account
+ # has been created but the user cannot log in until they are
+ # set up by an admin.
+ NewInactiveUserNotificationRecipients:
+ SAMPLE: {}
# Set AnonymousUserToken to enable anonymous user access. Populate this
# field with a random string at least 50 characters long.
AnonymousUserToken: ""
- # If a new user has an alternate email address (local@domain)
- # with the domain given here, its local part becomes the new
- # user's default username. Otherwise, the user's primary email
- # address is used.
+ # The login provider for a user may supply a primary email
+ # address and one or more alternate email addresses. If a new
+ # user has an alternate email address with the domain given
+ # here, use the username from the alternate email to generate
+ # the user's Arvados username. Otherwise, the username from
+ # user's primary email address is used for the Arvados username.
+ # Currently implemented for OpenID Connect only.
PreferDomainForUsername: ""
+ # Ruby ERB template used for the email sent out to users when
+ # they have been set up.
UserSetupMailText: |
<% if not @user.full_name.empty? -%>
<%= @user.full_name %>,
@@ -373,6 +440,66 @@ Clusters:
# cluster.
RoleGroupsVisibleToAll: true
+ # If CanCreateRoleGroups is true, regular (non-admin) users can
+ # create new role groups.
+ #
+ # If false, only admins can create new role groups.
+ CanCreateRoleGroups: true
+
+ # During each period, a log entry with event_type="activity"
+ # will be recorded for each user who is active during that
+ # period. The object_uuid attribute will indicate the user's
+ # UUID.
+ #
+ # Multiple log entries for the same user may be generated during
+ # a period if there are multiple controller processes or a
+ # controller process is restarted.
+ #
+ # Use 0 to disable activity logging.
+ ActivityLoggingPeriod: 24h
+
+ # The SyncUser* options control what system resources are managed by
+ # arvados-login-sync on shell nodes. They correspond to:
+ # * SyncUserAccounts: The user's Unix account on the shell node
+ # * SyncUserGroups: The group memberships of that account
+ # * SyncUserSSHKeys: Whether to authorize the user's Arvados SSH keys
+ # * SyncUserAPITokens: Whether to set up the user's Arvados API token
+ # All default to true.
+ SyncUserAccounts: true
+ SyncUserGroups: true
+ SyncUserSSHKeys: true
+ SyncUserAPITokens: true
+
+ # If SyncUserGroups=true, then arvados-login-sync will ensure that all
+ # managed accounts are members of the Unix groups listed in
+ # SyncRequiredGroups, in addition to any groups listed in their Arvados
+ # login permission. The default list includes the "fuse" group so
+ # users can use arv-mount. You can require no groups by specifying an
+ # empty list (i.e., `SyncRequiredGroups: []`).
+ SyncRequiredGroups:
+ - fuse
+
+ # SyncIgnoredGroups is a list of group names. arvados-login-sync will
+ # never modify these groups. If user login permissions list any groups
+ # in SyncIgnoredGroups, they will be ignored. If a user's Unix account
+ # belongs to any of these groups, arvados-login-sync will not remove
+ # the account from that group. The default is a set of particularly
+ # security-sensitive groups across Debian- and Red Hat-based
+ # distributions.
+ SyncIgnoredGroups:
+ - adm
+ - disk
+ - kmem
+ - mem
+ - root
+ - shadow
+ - staff
+ - sudo
+ - sys
+ - utempter
+ - utmp
+ - wheel
+
AuditLogs:
# Time to keep audit logs, in seconds. (An audit log is a row added
# to the "logs" table in the PostgreSQL database each time an
@@ -416,6 +543,15 @@ Clusters:
# params_truncated.
MaxRequestLogParamsSize: 2000
+ # In all services except RailsAPI, periodically check whether
+ # the incoming HTTP request queue is nearly full (see
+ # MaxConcurrentRequests) and, if so, write a snapshot of the
+ # request queue to {service}-requests.json in the specified
+ # directory.
+ #
+ # Leave blank to disable.
+ RequestQueueDumpDirectory: ""
+
Collections:
# Enable access controls for data stored in Keep. This should
@@ -508,7 +644,7 @@ Clusters:
#
# If SIGUSR1 is received during an idle period between operations,
# the next operation will start immediately.
- BalancePeriod: 10m
+ BalancePeriod: 6h
# Limits the number of collections retrieved by keep-balance per
# API transaction. If this is zero, page size is
@@ -517,11 +653,12 @@ Clusters:
BalanceCollectionBatch: 0
# The size of keep-balance's internal queue of
- # collections. Higher values use more memory and improve throughput
- # by allowing keep-balance to fetch the next page of collections
- # while the current page is still being processed. If this is zero
- # or omitted, pages are processed serially.
- BalanceCollectionBuffers: 1000
+ # collections. Higher values may improve throughput by allowing
+ # keep-balance to fetch collections from the database while the
+ # current collection are still being processed, at the expense of
+ # using more memory. If this is zero or omitted, pages are
+ # processed serially.
+ BalanceCollectionBuffers: 4
# Maximum time for a rebalancing run. This ensures keep-balance
# eventually gives up and retries if, for example, a network
@@ -537,6 +674,15 @@ Clusters:
# once.
BalanceUpdateLimit: 100000
+ # Maximum number of "pull block from other server" and "trash
+ # block" requests to send to each keepstore server at a
+ # time. Smaller values use less memory in keepstore and
+ # keep-balance. Larger values allow more progress per
+ # keep-balance iteration. A zero value computes all of the
+ # needed changes but does not apply any.
+ BalancePullLimit: 100000
+ BalanceTrashLimit: 100000
+
# Default lifetime for ephemeral collections: 2 weeks. This must not
# be less than BlobSigningTTL.
DefaultTrashLifetime: 336h
@@ -611,20 +757,18 @@ Clusters:
# Time to cache manifests, permission checks, and sessions.
TTL: 300s
- # Time to cache collection state.
- UUIDTTL: 5s
-
- # Block cache entries. Each block consumes up to 64 MiB RAM.
- MaxBlockEntries: 20
-
- # Collection cache entries.
- MaxCollectionEntries: 1000
+ # Maximum amount of data cached in /var/cache/arvados/keep.
+ # Can be given as a percentage ("10%") or a number of bytes
+ # ("10 GiB")
+ DiskCacheSize: 10%
- # Approximate memory limit (in bytes) for collection cache.
- MaxCollectionBytes: 100000000
-
- # UUID cache entries.
- MaxUUIDEntries: 1000
+ # Approximate memory limit (in bytes) for session cache.
+ #
+ # Note this applies to the in-memory representation of
+ # projects and collections -- metadata, block locators,
+ # filenames, etc. -- not the file data itself (see
+ # DiskCacheSize).
+ MaxCollectionBytes: 100 MB
# Persistent sessions.
MaxSessions: 100
@@ -657,6 +801,14 @@ Clusters:
# load on the API server and you don't need it.
WebDAVLogEvents: true
+ # Per-connection output buffer for WebDAV downloads. May improve
+ # throughput for large files, particularly when storage volumes
+ # have high latency.
+ #
+ # Size be specified as a number of bytes ("0") or with units
+ # ("128KiB", "1 MB").
+ WebDAVOutputBuffer: 0
+
Login:
# One of the following mechanisms (Google, PAM, LDAP, or
# LoginCluster) should be enabled; see
@@ -719,7 +871,7 @@ Clusters:
# OpenID claim field containing the email verification
# flag. Normally "email_verified". To accept every returned
# email address without checking a "verified" field at all,
- # use the empty string "".
+ # use an empty string "".
EmailVerifiedClaim: "email_verified"
# OpenID claim field containing the user's preferred
@@ -789,6 +941,16 @@ Clusters:
# Skip TLS certificate name verification.
InsecureTLS: false
+ # Mininum TLS version to negotiate when connecting to server
+ # (ldaps://... or StartTLS). It may be necessary to set this
+ # to "1.1" for compatibility with older LDAP servers that fail
+ # with 'LDAP Result Code 200 "Network Error": TLS handshake
+ # failed (tls: server selected unsupported protocol version
+ # 301)'.
+ #
+ # If blank, use the recommended minimum version (1.2).
+ MinTLSVersion: ""
+
# Strip the @domain part if a user supplies an email-style
# username with this domain. If "*", strip any user-provided
# domain. If "", never strip the domain part. Example:
@@ -870,16 +1032,31 @@ Clusters:
# by going through login again.
IssueTrustedTokens: true
- # When the token is returned to a client, the token itself may
- # be restricted from viewing/creating other tokens based on whether
- # the client is "trusted" or not. The local Workbench1 and
- # Workbench2 are trusted by default, but if this is a
- # LoginCluster, you probably want to include the other Workbench
- # instances in the federation in this list.
+ # Origins (scheme://host[:port]) of clients trusted to receive
+ # new tokens via login process. The ExternalURLs of the local
+ # Workbench1 and Workbench2 are trusted implicitly and do not
+ # need to be listed here. If this is a LoginCluster, you
+ # probably want to include the other Workbench instances in the
+ # federation in this list.
+ #
+ # A wildcard like "https://*.example" will match client URLs
+ # like "https://a.example" and "https://a.b.c.example".
+ #
+ # Example:
+ #
+ # TrustedClients:
+ # "https://workbench.other-cluster.example": {}
+ # "https://workbench2.other-cluster.example": {}
TrustedClients:
- SAMPLE:
- "https://workbench.federate1.example": {}
- "https://workbench.federate2.example": {}
+ SAMPLE: {}
+
+ # Treat any origin whose host part is "localhost" or a private
+ # IP address (e.g., http://10.0.0.123:3000/) as if it were
+ # listed in TrustedClients.
+ #
+ # Intended only for test/development use. Not appropriate for
+ # production use.
+ TrustPrivateNetworks: false
Git:
# Path to git or gitolite-shell executable. Each authenticated
@@ -945,12 +1122,24 @@ Clusters:
# troubleshooting purposes.
LogReuseDecisions: false
- # Default value for keep_cache_ram of a container's runtime_constraints.
- DefaultKeepCacheRAM: 268435456
+ # Default value for keep_cache_ram of a container's
+ # runtime_constraints. Note: this gets added to the RAM request
+ # used to allocate a VM or submit an HPC job.
+ #
+ # If this is zero, container requests that don't specify RAM or
+ # disk cache size will use a disk cache, sized to the
+ # container's RAM requirement (but with minimum 2 GiB and
+ # maximum 32 GiB).
+ #
+ # Note: If you change this value, containers that used the previous
+ # default value will only be reused by container requests that
+ # explicitly specify the previous value in their keep_cache_ram
+ # runtime constraint.
+ DefaultKeepCacheRAM: 0
# Number of times a container can be unlocked before being
# automatically cancelled.
- MaxDispatchAttempts: 5
+ MaxDispatchAttempts: 10
# Default value for container_count_max for container requests. This is the
# number of times Arvados will create a new container to satisfy a container
@@ -959,13 +1148,6 @@ Clusters:
# with the cancelled container.
MaxRetryAttempts: 3
- # The maximum number of compute nodes that can be in use simultaneously
- # If this limit is reduced, any existing nodes with slot number >= new limit
- # will not be counted against the new limit. In other words, the new limit
- # won't be strictly enforced until those nodes with higher slot numbers
- # go down.
- MaxComputeVMs: 64
-
# Schedule all child containers on preemptible instances (e.g. AWS
# Spot Instances) even if not requested by the submitter.
#
@@ -985,10 +1167,25 @@ Clusters:
# A price factor of 1.0 is a reasonable starting point.
PreemptiblePriceFactor: 0
+ # When the lowest-priced instance type for a given container is
+ # not available, try other instance types, up to the indicated
+ # maximum price factor.
+ #
+ # For example, with AvailabilityPriceFactor 1.5, if the
+ # lowest-cost instance type A suitable for a given container
+ # costs $2/h, Arvados may run the container on any instance type
+ # B costing $3/h or less when instance type A is not available
+ # or an idle instance of type B is already running.
+ MaximumPriceFactor: 1.5
+
# PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
# cloud dispatcher for executing containers on worker VMs.
# Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
# and ends with "\n-----END RSA PRIVATE KEY-----\n".
+ #
+ # Use "file:///absolute/path/to/key" to load the key from a
+ # separate file instead of embedding it in the configuration
+ # file.
DispatchPrivateKey: ""
# Maximum time to wait for workers to come up before abandoning
@@ -1008,7 +1205,7 @@ Clusters:
# Extra RAM to reserve on the node, in addition to
# the amount specified in the container's RuntimeConstraints
- ReserveExtraRAM: 256MiB
+ ReserveExtraRAM: 550MiB
# Minimum time between two attempts to run the same container
MinRetryPeriod: 0s
@@ -1063,12 +1260,16 @@ Clusters:
LocalKeepLogsToContainerLog: none
Logging:
- # When you run the db:delete_old_container_logs task, it will find
- # containers that have been finished for at least this many seconds,
+ # Periodically (see SweepInterval) Arvados will check for
+ # containers that have been finished for at least this long,
# and delete their stdout, stderr, arv-mount, crunch-run, and
# crunchstat logs from the logs table.
MaxAge: 720h
+ # How often to delete cached log entries for finished
+ # containers (see MaxAge).
+ SweepInterval: 12h
+
# These two settings control how frequently log events are flushed to the
# database. Log lines are buffered until either crunch_log_bytes_per_event
# has been reached or crunch_log_seconds_between_events has elapsed since
@@ -1087,9 +1288,14 @@ Clusters:
# before being silenced until the end of the period.
LogThrottleLines: 1024
- # Maximum bytes that may be logged by a single job. Log bytes that are
- # silenced by throttling are not counted against this total.
- LimitLogBytesPerJob: 67108864
+ # Maximum bytes that may be logged as legacy log events
+ # (records posted to the "logs" table). Starting with Arvados
+ # 2.7, container live logging has migrated to a new system
+ # (polling the container request live log endpoint) and this
+ # value should be 0. As of this writing, the container will
+ # still create a single log on the API server, noting for that
+ # log events are throttled.
+ LimitLogBytesPerJob: 0
LogPartialLineThrottlePeriod: 5s
@@ -1181,15 +1387,23 @@ Clusters:
# %M memory in MB
# %T tmp in MB
# %G number of GPU devices (runtime_constraints.cuda.device_count)
+ # %W maximum run time in minutes (see MaxRunTimeOverhead and
+ # MaxRunTimeDefault below)
#
- # Use %% to express a literal %. The %%J in the default will be changed
- # to %J, which is interpreted by bsub itself.
+ # Use %% to express a literal %. For example, the %%J in the
+ # default argument list will be changed to %J, which is
+ # interpreted by bsub itself.
#
# Note that the default arguments cause LSF to write two files
# in /tmp on the compute node each time an Arvados container
# runs. Ensure you have something in place to delete old files
# from /tmp, or adjust the "-o" and "-e" arguments accordingly.
- BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]", "-R", "select[mem>=%MMB]", "-R", "select[tmp>=%TMB]", "-R", "select[ncpus>=%C]"]
+ #
+ # If ["-We", "%W"] or ["-W", "%W"] appear in this argument
+ # list, and MaxRunTimeDefault is not set (see below), both of
+ # those arguments will be dropped from the argument list when
+ # running a container that has no max_run_time value.
+ BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]", "-R", "select[mem>=%MMB]", "-R", "select[tmp>=%TMB]", "-R", "select[ncpus>=%C]", "-We", "%W"]
# Arguments that will be appended to the bsub command line
# when submitting Arvados containers as LSF jobs with
@@ -1204,6 +1418,19 @@ Clusters:
# Arvados LSF dispatcher runs ("submission host").
BsubSudoUser: "crunch"
+ # When passing the scheduling_constraints.max_run_time value
+ # to LSF via "%W", add this much time to account for
+ # crunch-run startup/shutdown overhead.
+ MaxRunTimeOverhead: 5m
+
+ # If non-zero, MaxRunTimeDefault is used as the default value
+ # for max_run_time for containers that do not specify a time
+ # limit. MaxRunTimeOverhead will be added to this.
+ #
+ # Example:
+ # MaxRunTimeDefault: 2h
+ MaxRunTimeDefault: 0
+
JobsAPI:
# Enable the legacy 'jobs' API (crunch v1). This value must be a string.
#
@@ -1277,6 +1504,51 @@ Clusters:
# providers too, if desired.
MaxConcurrentInstanceCreateOps: 1
+ # The maximum number of instances to run at a time, or 0 for
+ # unlimited.
+ #
+ # If more instances than this are already running and busy
+ # when the dispatcher starts up, the running containers will
+ # be allowed to finish before the excess instances are shut
+ # down.
+ MaxInstances: 64
+
+ # The minimum number of instances expected to be runnable
+ # without reaching a provider-imposed quota.
+ #
+ # This is used as the initial value for the dispatcher's
+ # dynamic instance limit, which increases (up to MaxInstances)
+ # as containers start up successfully and decreases in
+ # response to high API load and cloud quota errors.
+ #
+ # Setting this to 0 means the dynamic instance limit will
+ # start at MaxInstances.
+ #
+ # Situations where you may want to set this (to a value less
+ # than MaxInstances) would be when there is significant
+ # variability or uncertainty in the actual cloud resources
+ # available. Upon reaching InitialQuotaEstimate the
+ # dispatcher will switch to a more conservative behavior with
+ # slower instance start to avoid over-shooting cloud resource
+ # limits.
+ InitialQuotaEstimate: 0
+
+ # Maximum fraction of available instance capacity allowed to
+ # run "supervisor" containers at any given time. A supervisor
+ # is a container whose purpose is mainly to submit and manage
+ # other containers, such as arvados-cwl-runner workflow
+ # runner.
+ #
+ # If there is a hard limit on the amount of concurrent
+ # containers that the cluster can run, it is important to
+ # avoid crowding out the containers doing useful work with
+ # containers who just create more work.
+ #
+ # For example, with the default MaxInstances of 64, it will
+ # schedule at most floor(64*0.50) = 32 concurrent workflow
+ # runners, ensuring 32 slots are available for work.
+ SupervisorFraction: 0.50
+
# Interval between cloud provider syncs/updates ("list all
# instances").
SyncInterval: 1m
@@ -1307,16 +1579,28 @@ Clusters:
# https://xxxxx.blob.core.windows.net/system/Microsoft.Compute/Images/images/xxxxx.vhd
ImageID: ""
+ # Shell script to run on new instances using the cloud
+ # provider's UserData (EC2) or CustomData (Azure) feature.
+ #
+ # It is not necessary to include a #!/bin/sh line.
+ InstanceInitCommand: ""
+
# An executable file (located on the dispatcher host) to be
# copied to cloud instances at runtime and used as the
# container runner/supervisor. The default value is the
# dispatcher program itself.
#
- # Use the empty string to disable this step: nothing will be
+ # Use an empty string to disable this step: nothing will be
# copied, and cloud instances are assumed to have a suitable
# version of crunch-run installed; see CrunchRunCommand above.
DeployRunnerBinary: "/proc/self/exe"
+ # Install the Dispatcher's SSH public key (derived from
+ # DispatchPrivateKey) when creating new cloud
+ # instances. Change this to false if you are using a different
+ # mechanism to pre-install the public key on new instances.
+ DeployPublicKey: true
+
# Tags to add on all resources (VMs, NICs, disks) created by
# the container dispatcher. (Arvados's own tags --
# InstanceType, IdleBehavior, and InstanceSecret -- will also
@@ -1348,16 +1632,43 @@ Clusters:
SecretAccessKey: ""
# (ec2) Instance configuration.
+
+ # (ec2) Region, like "us-east-1".
+ Region: ""
+
+ # (ec2) Security group IDs. Omit or use {} to use the
+ # default security group.
SecurityGroupIDs:
"SAMPLE": {}
+
+ # (ec2) One or more subnet IDs. Omit or leave empty to let
+ # AWS choose a default subnet from your default VPC. If
+ # multiple subnets are configured here (enclosed in brackets
+ # like [subnet-abc123, subnet-def456]) the cloud dispatcher
+ # will detect subnet-related errors and retry using a
+ # different subnet. Most sites specify one subnet.
SubnetID: ""
- Region: ""
+
EBSVolumeType: gp2
AdminUsername: debian
# (ec2) name of the IAMInstanceProfile for instances started by
# the cloud dispatcher. Leave blank when not needed.
IAMInstanceProfile: ""
+ # (ec2) how often to look up spot instance pricing data
+ # (only while running spot instances) for the purpose of
+ # calculating container cost estimates. A value of 0
+ # disables spot price lookups entirely.
+ SpotPriceUpdateInterval: 24h
+
+ # (ec2) per-GiB-month cost of EBS volumes. Matches
+ # EBSVolumeType. Used to account for AddedScratch when
+ # calculating container cost estimates. Note that
+ # https://aws.amazon.com/ebs/pricing/ defines GB to mean
+ # GiB, so an advertised price $0.10/GB indicates a real
+ # price of $0.10/GiB and can be entered here as 0.10.
+ EBSPrice: 0.10
+
# (azure) Credentials.
SubscriptionID: ""
ClientID: ""
@@ -1411,6 +1722,13 @@ Clusters:
RAM: 128MiB
IncludedScratch: 16GB
AddedScratch: 0
+ # Hourly price ($), used to select node types for containers,
+ # and to calculate estimated container costs. For spot
+ # instances on EC2, this is also used as the maximum price
+ # when launching spot instances, while the estimated container
+ # cost is computed based on the current spot price according
+ # to AWS. On Azure, and on-demand instances on EC2, the price
+ # given here is used to compute container cost estimates.
Price: 0.1
Preemptible: false
# Include this section if the node type includes GPU (CUDA) support
@@ -1463,6 +1781,11 @@ Clusters:
ReadOnly: false
"http://host1.example:25107": {}
ReadOnly: false
+ # AllowTrashWhenReadOnly enables unused and overreplicated
+ # blocks to be trashed/deleted even when ReadOnly is
+ # true. Normally, this is false and ReadOnly prevents all
+ # trash/delete operations as well as writes.
+ AllowTrashWhenReadOnly: false
Replication: 1
StorageClasses:
# If you have configured storage classes (see StorageClasses
@@ -1486,8 +1809,6 @@ Clusters:
ReadTimeout: 10m
RaceWindow: 24h
PrefixLength: 0
- # Use aws-s3-go (v2) instead of goamz
- UseAWSS3v2Driver: false
# For S3 driver, potentially unsafe tuning parameter,
# intentionally excluded from main documentation.
@@ -1533,8 +1854,18 @@ Clusters:
Serialize: false
Mail:
- MailchimpAPIKey: ""
- MailchimpListID: ""
+ # In order to send mail, Arvados expects a default SMTP server
+ # on localhost:25. It cannot require authentication on
+ # connections from localhost. That server should be configured
+ # to relay mail to a "real" SMTP server that is able to send
+ # email on behalf of your domain.
+
+ # See also the "Users" configuration section for additional
+ # email-related options.
+
+ # When a user has been set up (meaning they are able to log in)
+ # they will receive an email using the template specified
+ # earlier in Users.UserSetupMailText
SendUserSetupNotificationEmail: true
# Bug/issue report notification to and from addresses
@@ -1544,6 +1875,10 @@ Clusters:
# Generic issue email from
EmailFrom: "arvados@example.com"
+
+ # No longer supported, to be removed.
+ MailchimpAPIKey: ""
+ MailchimpListID: ""
RemoteClusters:
"*":
Host: ""
@@ -1577,18 +1912,12 @@ Clusters:
ArvadosDocsite: https://doc.arvados.org
ArvadosPublicDataDocURL: https://playground.arvados.org/projects/public
ShowUserAgreementInline: false
- SecretKeyBase: ""
# Set this configuration to true to avoid providing an easy way for users
# to share data with unauthenticated users; this may be necessary on
# installations where strict data access controls are needed.
DisableSharingURLsUI: false
- # Scratch directory used by the remote repository browsing
- # feature. If it doesn't exist, it (and any missing parents) will be
- # created using mkdir_p.
- RepositoryCache: /var/www/arvados-workbench/current/tmp/git
-
# Below is a sample setting of user_profile_form_fields config parameter.
# This configuration parameter should be set to either false (to disable) or
# to a map as shown below.
@@ -1635,71 +1964,7 @@ Clusters:
# to display on the profile page.
UserProfileFormMessage: 'Welcome to Arvados. All required fields must be completed before you can proceed.'
- # Mimetypes of applications for which the view icon
- # would be enabled in a collection's show page.
- # It is sufficient to list only applications here.
- # No need to list text and image types.
- ApplicationMimetypesWithViewIcon:
- cwl: {}
- fasta: {}
- go: {}
- javascript: {}
- json: {}
- pdf: {}
- python: {}
- x-python: {}
- r: {}
- rtf: {}
- sam: {}
- x-sh: {}
- vnd.realvnc.bed: {}
- xml: {}
- xsl: {}
- SAMPLE: {}
-
- # The maximum number of bytes to load in the log viewer
- LogViewerMaxBytes: 1M
-
- # When anonymous_user_token is configured, show public projects page
- EnablePublicProjectsPage: true
-
- # By default, disable the "Getting Started" popup which is specific to Arvados playground
- EnableGettingStartedPopup: false
-
- # Ask Arvados API server to compress its response payloads.
- APIResponseCompression: true
-
- # Timeouts for API requests.
- APIClientConnectTimeout: 2m
- APIClientReceiveTimeout: 5m
-
- # Maximum number of historic log records of a running job to fetch
- # and display in the Log tab, while subscribing to web sockets.
- RunningJobLogRecordsToFetch: 2000
-
- # In systems with many shared projects, loading of dashboard and topnav
- # can be slow due to collections indexing; use the following parameters
- # to suppress these properties
- ShowRecentCollectionsOnDashboard: true
- ShowUserNotifications: true
-
- # Enable/disable "multi-site search" in top nav ("true"/"false"), or
- # a link to the multi-site search page on a "home" Workbench site.
- #
- # Example:
- # https://workbench.zzzzz.arvadosapi.com/collections/multisite
- MultiSiteSearch: ""
-
- # Should workbench allow management of local git repositories? Set to false if
- # the jobs api is disabled and there are no local git repositories.
- Repositories: true
-
SiteName: Arvados Workbench
- ProfilingEnabled: false
-
- # This is related to obsolete Google OpenID 1.0 login
- # but some workbench stuff still expects it to be set.
- DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
# Workbench2 configs
FileViewersConfigURL: ""
@@ -1708,6 +1973,12 @@ Clusters:
# This feature is disabled when set to zero.
IdleTimeout: 0s
+ # UUID of a collection. This collection should be shared with
+ # all users. Workbench will look for a file "banner.html" in
+ # this collection and display its contents (should be
+ # HTML-formatted text) when users first log in to Workbench.
+ BannerUUID: ""
+
# Workbench welcome screen, this is HTML text that will be
# incorporated directly onto the page.
WelcomePageHTML: |
diff --git a/lib/config/deprecated.go b/lib/config/deprecated.go
index c0a7921b36..d518b3414a 100644
--- a/lib/config/deprecated.go
+++ b/lib/config/deprecated.go
@@ -494,17 +494,8 @@ func (ldr *Loader) loadOldKeepWebConfig(cfg *arvados.Config) error {
if oc.Cache.TTL != nil {
cluster.Collections.WebDAVCache.TTL = *oc.Cache.TTL
}
- if oc.Cache.UUIDTTL != nil {
- cluster.Collections.WebDAVCache.UUIDTTL = *oc.Cache.UUIDTTL
- }
- if oc.Cache.MaxCollectionEntries != nil {
- cluster.Collections.WebDAVCache.MaxCollectionEntries = *oc.Cache.MaxCollectionEntries
- }
if oc.Cache.MaxCollectionBytes != nil {
- cluster.Collections.WebDAVCache.MaxCollectionBytes = *oc.Cache.MaxCollectionBytes
- }
- if oc.Cache.MaxUUIDEntries != nil {
- cluster.Collections.WebDAVCache.MaxUUIDEntries = *oc.Cache.MaxUUIDEntries
+ cluster.Collections.WebDAVCache.MaxCollectionBytes = arvados.ByteSize(*oc.Cache.MaxCollectionBytes)
}
if oc.AnonymousTokens != nil {
if len(*oc.AnonymousTokens) > 0 {
diff --git a/lib/config/deprecated_test.go b/lib/config/deprecated_test.go
index 4206ef5771..e06a1f231d 100644
--- a/lib/config/deprecated_test.go
+++ b/lib/config/deprecated_test.go
@@ -199,10 +199,7 @@ func (s *LoadSuite) TestLegacyKeepWebConfig(c *check.C) {
c.Check(cluster.SystemRootToken, check.Equals, "abcdefg")
c.Check(cluster.Collections.WebDAVCache.TTL, check.Equals, arvados.Duration(60*time.Second))
- c.Check(cluster.Collections.WebDAVCache.UUIDTTL, check.Equals, arvados.Duration(time.Second))
- c.Check(cluster.Collections.WebDAVCache.MaxCollectionEntries, check.Equals, 42)
- c.Check(cluster.Collections.WebDAVCache.MaxCollectionBytes, check.Equals, int64(1234567890))
- c.Check(cluster.Collections.WebDAVCache.MaxUUIDEntries, check.Equals, 100)
+ c.Check(cluster.Collections.WebDAVCache.MaxCollectionBytes, check.Equals, arvados.ByteSize(1234567890))
c.Check(cluster.Services.WebDAVDownload.ExternalURL, check.Equals, arvados.URL{Host: "download.example.com", Path: "/"})
c.Check(cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: ":80"}], check.NotNil)
diff --git a/lib/config/export.go b/lib/config/export.go
index a55295d126..f511ebbcb1 100644
--- a/lib/config/export.go
+++ b/lib/config/export.go
@@ -37,8 +37,8 @@ func ExportJSON(w io.Writer, cluster *arvados.Cluster) error {
return json.NewEncoder(w).Encode(m)
}
-// whitelist classifies configs as safe/unsafe to reveal to
-// unauthenticated clients.
+// whitelist classifies configs as safe/unsafe to reveal through the API
+// endpoint. Note that endpoint does not require authentication.
//
// Every config entry must either be listed explicitly here along with
// all of its parent keys (e.g., "API" + "API.RequestTimeout"), or
@@ -66,10 +66,16 @@ var whitelist = map[string]bool{
"API.FreezeProjectRequiresProperties": true,
"API.FreezeProjectRequiresProperties.*": true,
"API.KeepServiceRequestTimeout": false,
+ "API.LockBeforeUpdate": false,
+ "API.LogCreateRequestFraction": false,
+ "API.MaxConcurrentRailsRequests": false,
"API.MaxConcurrentRequests": false,
+ "API.MaxGatewayTunnels": false,
"API.MaxIndexDatabaseRead": false,
"API.MaxItemsPerResponse": true,
"API.MaxKeepBlobBuffers": false,
+ "API.MaxQueuedRequests": false,
+ "API.MaxQueueTimeForLockRequests": false,
"API.MaxRequestAmplification": false,
"API.MaxRequestSize": true,
"API.MaxTokenLifetime": false,
@@ -88,7 +94,9 @@ var whitelist = map[string]bool{
"Collections.BalanceCollectionBatch": false,
"Collections.BalanceCollectionBuffers": false,
"Collections.BalancePeriod": false,
+ "Collections.BalancePullLimit": false,
"Collections.BalanceTimeout": false,
+ "Collections.BalanceTrashLimit": false,
"Collections.BalanceUpdateLimit": false,
"Collections.BlobDeleteConcurrency": false,
"Collections.BlobMissingReport": false,
@@ -114,6 +122,7 @@ var whitelist = map[string]bool{
"Collections.TrustAllContent": true,
"Collections.WebDAVCache": false,
"Collections.WebDAVLogEvents": false,
+ "Collections.WebDAVOutputBuffer": false,
"Collections.WebDAVPermission": false,
"Containers": true,
"Containers.AlwaysUsePreemptibleInstances": true,
@@ -130,8 +139,8 @@ var whitelist = map[string]bool{
"Containers.Logging": false,
"Containers.LogReuseDecisions": false,
"Containers.LSF": false,
- "Containers.MaxComputeVMs": false,
"Containers.MaxDispatchAttempts": false,
+ "Containers.MaximumPriceFactor": true,
"Containers.MaxRetryAttempts": true,
"Containers.MinRetryPeriod": true,
"Containers.PreemptiblePriceFactor": false,
@@ -162,6 +171,7 @@ var whitelist = map[string]bool{
"Login.LDAP.EmailAttribute": false,
"Login.LDAP.Enable": true,
"Login.LDAP.InsecureTLS": false,
+ "Login.LDAP.MinTLSVersion": false,
"Login.LDAP.SearchAttribute": false,
"Login.LDAP.SearchBase": false,
"Login.LDAP.SearchBindPassword": false,
@@ -193,6 +203,7 @@ var whitelist = map[string]bool{
"Login.Test.Users": false,
"Login.TokenLifetime": false,
"Login.TrustedClients": false,
+ "Login.TrustPrivateNetworks": false,
"Mail": true,
"Mail.EmailFrom": false,
"Mail.IssueReporterEmailFrom": false,
@@ -226,6 +237,7 @@ var whitelist = map[string]bool{
"TLS.Key": false,
"Users": true,
"Users.ActivatedUsersAreVisibleToOthers": false,
+ "Users.ActivityLoggingPeriod": false,
"Users.AdminNotifierEmailFrom": false,
"Users.AnonymousUserToken": true,
"Users.AutoAdminFirstUser": false,
@@ -234,12 +246,19 @@ var whitelist = map[string]bool{
"Users.AutoSetupNewUsersWithRepository": false,
"Users.AutoSetupNewUsersWithVmUUID": false,
"Users.AutoSetupUsernameBlacklist": false,
+ "Users.CanCreateRoleGroups": true,
"Users.EmailSubjectPrefix": false,
"Users.NewInactiveUserNotificationRecipients": false,
"Users.NewUserNotificationRecipients": false,
"Users.NewUsersAreActive": false,
"Users.PreferDomainForUsername": false,
"Users.RoleGroupsVisibleToAll": false,
+ "Users.SyncIgnoredGroups": true,
+ "Users.SyncRequiredGroups": true,
+ "Users.SyncUserAccounts": true,
+ "Users.SyncUserAPITokens": true,
+ "Users.SyncUserGroups": true,
+ "Users.SyncUserSSHKeys": true,
"Users.UserNotifierEmailBcc": false,
"Users.UserNotifierEmailFrom": false,
"Users.UserProfileNotificationAddress": false,
@@ -263,6 +282,7 @@ var whitelist = map[string]bool{
"Workbench.ApplicationMimetypesWithViewIcon.*": true,
"Workbench.ArvadosDocsite": true,
"Workbench.ArvadosPublicDataDocURL": true,
+ "Workbench.BannerUUID": true,
"Workbench.DefaultOpenIdPrefix": false,
"Workbench.DisableSharingURLsUI": true,
"Workbench.EnableGettingStartedPopup": true,
@@ -276,7 +296,6 @@ var whitelist = map[string]bool{
"Workbench.Repositories": false,
"Workbench.RepositoryCache": false,
"Workbench.RunningJobLogRecordsToFetch": true,
- "Workbench.SecretKeyBase": false,
"Workbench.ShowRecentCollectionsOnDashboard": true,
"Workbench.ShowUserAgreementInline": true,
"Workbench.ShowUserNotifications": true,
diff --git a/lib/config/load.go b/lib/config/load.go
index fbd01488a0..d504f7796c 100644
--- a/lib/config/load.go
+++ b/lib/config/load.go
@@ -26,6 +26,7 @@ import (
"github.com/imdario/mergo"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
"golang.org/x/sys/unix"
)
@@ -448,6 +449,7 @@ func (ldr *Loader) setLoopbackInstanceType(cfg *arvados.Config) error {
RAM: hostram,
Scratch: scratch,
IncludedScratch: scratch,
+ Price: 1.0,
}}
cfg.Clusters[id] = cc
}
@@ -689,3 +691,17 @@ func (ldr *Loader) RegisterMetrics(reg *prometheus.Registry) {
vec.WithLabelValues(hash).Set(float64(ldr.loadTimestamp.UnixNano()) / 1e9)
reg.MustRegister(vec)
}
+
+// Load an SSH private key from the given confvalue, which is either
+// the literal key or an absolute path to a file containing the key.
+func LoadSSHKey(confvalue string) (ssh.Signer, error) {
+ if fnm := strings.TrimPrefix(confvalue, "file://"); fnm != confvalue && strings.HasPrefix(fnm, "/") {
+ keydata, err := os.ReadFile(fnm)
+ if err != nil {
+ return nil, err
+ }
+ return ssh.ParsePrivateKey(keydata)
+ } else {
+ return ssh.ParsePrivateKey([]byte(confvalue))
+ }
+}
diff --git a/lib/config/load_test.go b/lib/config/load_test.go
index a19400c191..75efc6a35a 100644
--- a/lib/config/load_test.go
+++ b/lib/config/load_test.go
@@ -19,10 +19,10 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/ghodss/yaml"
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/expfmt"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
check "gopkg.in/check.v1"
@@ -882,15 +882,10 @@ func (s *LoadSuite) TestSourceTimestamp(c *check.C) {
c.Check(int(cfg.SourceTimestamp.Sub(trial.expectTime).Seconds()), check.Equals, 0)
c.Check(int(ldr.loadTimestamp.Sub(time.Now()).Seconds()), check.Equals, 0)
- var buf bytes.Buffer
reg := prometheus.NewRegistry()
ldr.RegisterMetrics(reg)
- enc := expfmt.NewEncoder(&buf, expfmt.FmtText)
- got, _ := reg.Gather()
- for _, mf := range got {
- enc.Encode(mf)
- }
- c.Check(buf.String(), check.Matches, `# HELP .*
+ metrics := arvadostest.GatherMetricsAsString(reg)
+ c.Check(metrics, check.Matches, `# HELP .*
# TYPE .*
arvados_config_load_timestamp_seconds{sha256="83aea5d82eb1d53372cd65c936c60acc1c6ef946e61977bbca7cfea709d201a8"} \Q`+fmt.Sprintf("%g", float64(ldr.loadTimestamp.UnixNano())/1e9)+`\E
# HELP .*
@@ -912,3 +907,10 @@ func (s *LoadSuite) TestGetFilesystemSize(c *check.C) {
c.Check(err, check.IsNil)
c.Logf("getFilesystemSize(%q) == %v", path, size)
}
+
+func (s *LoadSuite) TestLoadSSHKey(c *check.C) {
+ cwd, err := os.Getwd()
+ c.Assert(err, check.IsNil)
+ _, err = LoadSSHKey("file://" + cwd + "/../dispatchcloud/test/sshkey_dispatch")
+ c.Check(err, check.IsNil)
+}
diff --git a/lib/controller/dblock/dblock.go b/lib/controller/dblock/dblock.go
index 1a36822d5b..c59bcef0b2 100644
--- a/lib/controller/dblock/dblock.go
+++ b/lib/controller/dblock/dblock.go
@@ -7,6 +7,8 @@ package dblock
import (
"context"
"database/sql"
+ "fmt"
+ "net"
"sync"
"time"
@@ -15,8 +17,13 @@ import (
)
var (
- TrashSweep = &DBLocker{key: 10001}
- retryDelay = 5 * time.Second
+ TrashSweep = &DBLocker{key: 10001}
+ ContainerLogSweep = &DBLocker{key: 10002}
+ KeepBalanceService = &DBLocker{key: 10003} // keep-balance service in periodic-sweep loop
+ KeepBalanceActive = &DBLocker{key: 10004} // keep-balance sweep in progress (either -once=true or service loop)
+ Dispatch = &DBLocker{key: 10005} // any dispatcher running
+ RailsMigrations = &DBLocker{key: 10006}
+ retryDelay = 5 * time.Second
)
// DBLocker uses pg_advisory_lock to maintain a cluster-wide lock for
@@ -30,8 +37,11 @@ type DBLocker struct {
}
// Lock acquires the advisory lock, waiting/reconnecting if needed.
-func (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sqlx.DB, error)) {
- logger := ctxlog.FromContext(ctx)
+//
+// Returns false if ctx is canceled before the lock is acquired.
+func (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sqlx.DB, error)) bool {
+ logger := ctxlog.FromContext(ctx).WithField("ID", dbl.key)
+ var lastHeldBy string
for ; ; time.Sleep(retryDelay) {
dbl.mtx.Lock()
if dbl.conn != nil {
@@ -40,55 +50,87 @@ func (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sql
dbl.mtx.Unlock()
continue
}
+ if ctx.Err() != nil {
+ dbl.mtx.Unlock()
+ return false
+ }
db, err := getdb(ctx)
- if err != nil {
- logger.WithError(err).Infof("error getting database pool")
+ if err == context.Canceled {
+ dbl.mtx.Unlock()
+ return false
+ } else if err != nil {
+ logger.WithError(err).Info("error getting database pool")
dbl.mtx.Unlock()
continue
}
conn, err := db.Conn(ctx)
- if err != nil {
+ if err == context.Canceled {
+ dbl.mtx.Unlock()
+ return false
+ } else if err != nil {
logger.WithError(err).Info("error getting database connection")
dbl.mtx.Unlock()
continue
}
var locked bool
err = conn.QueryRowContext(ctx, `SELECT pg_try_advisory_lock($1)`, dbl.key).Scan(&locked)
- if err != nil {
- logger.WithError(err).Infof("error getting pg_try_advisory_lock %d", dbl.key)
+ if err == context.Canceled {
+ return false
+ } else if err != nil {
+ logger.WithError(err).Info("error getting pg_try_advisory_lock")
conn.Close()
dbl.mtx.Unlock()
continue
}
if !locked {
+ var host string
+ var port int
+ err = conn.QueryRowContext(ctx, `SELECT client_addr, client_port FROM pg_stat_activity WHERE pid IN
+ (SELECT pid FROM pg_locks
+ WHERE locktype = $1 AND objid = $2)`, "advisory", dbl.key).Scan(&host, &port)
+ if err != nil {
+ logger.WithError(err).Info("error getting other client info")
+ } else {
+ heldBy := net.JoinHostPort(host, fmt.Sprintf("%d", port))
+ if lastHeldBy != heldBy {
+ logger.WithField("DBClient", heldBy).Info("waiting for other process to release lock")
+ lastHeldBy = heldBy
+ }
+ }
conn.Close()
dbl.mtx.Unlock()
continue
}
- logger.Debugf("acquired pg_advisory_lock %d", dbl.key)
+ logger.Debug("acquired pg_advisory_lock")
dbl.ctx, dbl.getdb, dbl.conn = ctx, getdb, conn
dbl.mtx.Unlock()
- return
+ return true
}
}
// Check confirms that the lock is still active (i.e., the session is
// still alive), and re-acquires if needed. Panics if Lock is not
// acquired first.
-func (dbl *DBLocker) Check() {
+//
+// Returns false if the context passed to Lock() is canceled before
+// the lock is confirmed or reacquired.
+func (dbl *DBLocker) Check() bool {
dbl.mtx.Lock()
err := dbl.conn.PingContext(dbl.ctx)
- if err == nil {
- ctxlog.FromContext(dbl.ctx).Debugf("pg_advisory_lock %d connection still alive", dbl.key)
+ if err == context.Canceled {
+ dbl.mtx.Unlock()
+ return false
+ } else if err == nil {
+ ctxlog.FromContext(dbl.ctx).WithField("ID", dbl.key).Debug("connection still alive")
dbl.mtx.Unlock()
- return
+ return true
}
ctxlog.FromContext(dbl.ctx).WithError(err).Info("database connection ping failed")
dbl.conn.Close()
dbl.conn = nil
ctx, getdb := dbl.ctx, dbl.getdb
dbl.mtx.Unlock()
- dbl.Lock(ctx, getdb)
+ return dbl.Lock(ctx, getdb)
}
func (dbl *DBLocker) Unlock() {
@@ -97,9 +139,9 @@ func (dbl *DBLocker) Unlock() {
if dbl.conn != nil {
_, err := dbl.conn.ExecContext(context.Background(), `SELECT pg_advisory_unlock($1)`, dbl.key)
if err != nil {
- ctxlog.FromContext(dbl.ctx).WithError(err).Infof("error releasing pg_advisory_lock %d", dbl.key)
+ ctxlog.FromContext(dbl.ctx).WithError(err).WithField("ID", dbl.key).Info("error releasing pg_advisory_lock")
} else {
- ctxlog.FromContext(dbl.ctx).Debugf("released pg_advisory_lock %d", dbl.key)
+ ctxlog.FromContext(dbl.ctx).WithField("ID", dbl.key).Debug("released pg_advisory_lock")
}
dbl.conn.Close()
dbl.conn = nil
diff --git a/lib/controller/dblock/dblock_test.go b/lib/controller/dblock/dblock_test.go
new file mode 100644
index 0000000000..b10b2a3acd
--- /dev/null
+++ b/lib/controller/dblock/dblock_test.go
@@ -0,0 +1,91 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dblock
+
+import (
+ "bytes"
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/jmoiron/sqlx"
+ "github.com/sirupsen/logrus"
+ check "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&suite{})
+
+type suite struct {
+ cluster *arvados.Cluster
+ db *sqlx.DB
+ getdb func(context.Context) (*sqlx.DB, error)
+}
+
+var testLocker = &DBLocker{key: 999}
+
+func (s *suite) SetUpSuite(c *check.C) {
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ s.cluster, err = cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ s.db = arvadostest.DB(c, s.cluster)
+ s.getdb = func(context.Context) (*sqlx.DB, error) { return s.db, nil }
+}
+
+func (s *suite) TestLock(c *check.C) {
+ retryDelay = 10 * time.Millisecond
+
+ var logbuf bytes.Buffer
+ logger := ctxlog.New(&logbuf, "text", "debug")
+ logger.Level = logrus.DebugLevel
+ ctx := ctxlog.Context(context.Background(), logger)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ testLocker.Lock(ctx, s.getdb)
+ testLocker.Check()
+
+ lock2 := make(chan bool)
+ var wg sync.WaitGroup
+ defer wg.Wait()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ testLocker2 := &DBLocker{key: 999}
+ testLocker2.Lock(ctx, s.getdb)
+ close(lock2)
+ testLocker2.Check()
+ testLocker2.Unlock()
+ }()
+
+ // Second lock should wait for first to Unlock
+ select {
+ case <-time.After(time.Second / 10):
+ c.Check(logbuf.String(), check.Matches, `(?ms).*level=info.*DBClient="[^"]+:\d+".*ID=999.*`)
+ case <-lock2:
+ c.Log("double-lock")
+ c.Fail()
+ }
+
+ testLocker.Check()
+ testLocker.Unlock()
+
+ // Now the second lock should succeed within retryDelay
+ select {
+ case <-time.After(retryDelay * 2):
+ c.Log("timed out")
+ c.Fail()
+ case <-lock2:
+ }
+ c.Logf("%s", logbuf.String())
+}
diff --git a/lib/controller/federation.go b/lib/controller/federation.go
index e7d6e29b88..93b8315a63 100644
--- a/lib/controller/federation.go
+++ b/lib/controller/federation.go
@@ -142,7 +142,7 @@ type CurrentUser struct {
// non-nil, true, nil -- if the token is valid
func (h *Handler) validateAPItoken(req *http.Request, token string) (*CurrentUser, bool, error) {
user := CurrentUser{Authorization: arvados.APIClientAuthorization{APIToken: token}}
- db, err := h.db(req.Context())
+ db, err := h.dbConnector.GetDB(req.Context())
if err != nil {
ctxlog.FromContext(req.Context()).WithError(err).Debugf("validateAPItoken(%s): database error", token)
return nil, false, err
@@ -179,7 +179,7 @@ func (h *Handler) validateAPItoken(req *http.Request, token string) (*CurrentUse
}
func (h *Handler) createAPItoken(req *http.Request, userUUID string, scopes []string) (*arvados.APIClientAuthorization, error) {
- db, err := h.db(req.Context())
+ db, err := h.dbConnector.GetDB(req.Context())
if err != nil {
return nil, err
}
diff --git a/lib/controller/federation/collection_test.go b/lib/controller/federation/collection_test.go
new file mode 100644
index 0000000000..8256819efb
--- /dev/null
+++ b/lib/controller/federation/collection_test.go
@@ -0,0 +1,106 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package federation
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&collectionSuite{})
+
+type collectionSuite struct {
+ FederationSuite
+}
+
+func (s *collectionSuite) TestMultipleBackendFailureStatus(c *check.C) {
+ nxPDH := "a4f995dd0c08216f37cb1bdec990f0cd+1234"
+ s.cluster.ClusterID = "local"
+ for _, trial := range []struct {
+ label string
+ token string
+ localStatus int
+ remoteStatus map[string]int
+ expectStatus int
+ }{
+ {
+ "all backends return 404 => 404",
+ arvadostest.SystemRootToken,
+ http.StatusNotFound,
+ map[string]int{
+ "aaaaa": http.StatusNotFound,
+ "bbbbb": http.StatusNotFound,
+ },
+ http.StatusNotFound,
+ },
+ {
+ "all backends return 401 => 401 (e.g., bad token)",
+ arvadostest.SystemRootToken,
+ http.StatusUnauthorized,
+ map[string]int{
+ "aaaaa": http.StatusUnauthorized,
+ "bbbbb": http.StatusUnauthorized,
+ },
+ http.StatusUnauthorized,
+ },
+ {
+ "local 404, remotes 403 => 422 (mix of non-retryable errors)",
+ arvadostest.SystemRootToken,
+ http.StatusNotFound,
+ map[string]int{
+ "aaaaa": http.StatusForbidden,
+ "bbbbb": http.StatusForbidden,
+ },
+ http.StatusUnprocessableEntity,
+ },
+ {
+ "local 404, remotes 401/403/404 => 422 (mix of non-retryable errors)",
+ arvadostest.SystemRootToken,
+ http.StatusNotFound,
+ map[string]int{
+ "aaaaa": http.StatusUnauthorized,
+ "bbbbb": http.StatusForbidden,
+ "ccccc": http.StatusNotFound,
+ },
+ http.StatusUnprocessableEntity,
+ },
+ {
+ "local 404, remotes 401/403/500 => 502 (at least one remote is retryable)",
+ arvadostest.SystemRootToken,
+ http.StatusNotFound,
+ map[string]int{
+ "aaaaa": http.StatusUnauthorized,
+ "bbbbb": http.StatusForbidden,
+ "ccccc": http.StatusInternalServerError,
+ },
+ http.StatusBadGateway,
+ },
+ } {
+ c.Logf("trial: %v", trial)
+ s.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
+ s.fed.local = &arvadostest.APIStub{Error: httpserver.ErrorWithStatus(fmt.Errorf("stub error %d", trial.localStatus), trial.localStatus)}
+ for id, status := range trial.remoteStatus {
+ s.addDirectRemote(c, id, &arvadostest.APIStub{Error: httpserver.ErrorWithStatus(fmt.Errorf("stub error %d", status), status)})
+ }
+
+ ctx := context.Background()
+ ctx = ctxlog.Context(ctx, ctxlog.TestLogger(c))
+ if trial.token != "" {
+ ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{trial.token}})
+ }
+
+ _, err := s.fed.CollectionGet(s.ctx, arvados.GetOptions{UUID: nxPDH})
+ c.Check(err.(httpserver.HTTPStatusError).HTTPStatus(), check.Equals, trial.expectStatus)
+ }
+}
diff --git a/lib/controller/federation/conn.go b/lib/controller/federation/conn.go
index ffb150bf26..949cc56dd2 100644
--- a/lib/controller/federation/conn.go
+++ b/lib/controller/federation/conn.go
@@ -14,6 +14,7 @@ import (
"net/url"
"regexp"
"strings"
+ "sync"
"time"
"git.arvados.org/arvados.git/lib/config"
@@ -23,16 +24,18 @@ import (
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/health"
+ "github.com/jmoiron/sqlx"
)
type Conn struct {
+ bgCtx context.Context
cluster *arvados.Cluster
local backend
remotes map[string]backend
}
-func New(cluster *arvados.Cluster, healthFuncs *map[string]health.Func) *Conn {
- local := localdb.NewConn(cluster)
+func New(bgCtx context.Context, cluster *arvados.Cluster, healthFuncs *map[string]health.Func, getdb func(context.Context) (*sqlx.DB, error)) *Conn {
+ local := localdb.NewConn(bgCtx, cluster, getdb)
remotes := map[string]backend{}
for id, remote := range cluster.RemoteClusters {
if !remote.Proxy || id == cluster.ClusterID {
@@ -51,6 +54,7 @@ func New(cluster *arvados.Cluster, healthFuncs *map[string]health.Func) *Conn {
}
return &Conn{
+ bgCtx: bgCtx,
cluster: cluster,
local: local,
remotes: remotes,
@@ -175,20 +179,29 @@ func (conn *Conn) tryLocalThenRemotes(ctx context.Context, forwardedFor string,
errchan <- fn(ctx, remoteID, be)
}()
}
- all404 := true
+ returncode := http.StatusNotFound
var errs []error
for i := 0; i < cap(errchan); i++ {
err := <-errchan
if err == nil {
return nil
}
- all404 = all404 && errStatus(err) == http.StatusNotFound
errs = append(errs, err)
+ if code := errStatus(err); code >= 500 || code == http.StatusTooManyRequests {
+ // If any of the remotes have a retryable
+ // error (and none succeed) we'll return 502.
+ returncode = http.StatusBadGateway
+ } else if code != http.StatusNotFound && returncode != http.StatusBadGateway {
+ // If some of the remotes have non-retryable
+ // non-404 errors (and none succeed or have
+ // retryable errors) we'll return 422.
+ returncode = http.StatusUnprocessableEntity
+ }
}
- if all404 {
+ if returncode == http.StatusNotFound {
return notFoundError{}
}
- return httpErrorf(http.StatusBadGateway, "errors: %v", errs)
+ return httpErrorf(returncode, "errors: %v", errs)
}
func (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
@@ -212,7 +225,11 @@ func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
}
func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
- return conn.chooseBackend(conn.cluster.ClusterID).VocabularyGet(ctx)
+ return conn.local.VocabularyGet(ctx)
+}
+
+func (conn *Conn) DiscoveryDocument(ctx context.Context) (arvados.DiscoveryDocument, error) {
+ return conn.local.DiscoveryDocument(ctx)
}
func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
@@ -241,30 +258,71 @@ func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arva
return conn.local.Login(ctx, options)
}
+var v2TokenRegexp = regexp.MustCompile(`^v2/[a-z0-9]{5}-gj3su-[a-z0-9]{15}/`)
+
func (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
- // If the logout request comes with an API token from a known
- // remote cluster, redirect to that cluster's logout handler
- // so it has an opportunity to clear sessions, expire tokens,
- // etc. Otherwise use the local endpoint.
- reqauth, ok := auth.FromContext(ctx)
- if !ok || len(reqauth.Tokens) == 0 || len(reqauth.Tokens[0]) < 8 || !strings.HasPrefix(reqauth.Tokens[0], "v2/") {
- return conn.local.Logout(ctx, options)
- }
- id := reqauth.Tokens[0][3:8]
- if id == conn.cluster.ClusterID {
- return conn.local.Logout(ctx, options)
- }
- remote, ok := conn.remotes[id]
- if !ok {
- return conn.local.Logout(ctx, options)
+ // If the token was issued by another cluster, we want to issue a logout
+ // request to the issuing instance to invalidate the token federation-wide.
+ // If this federation has a login cluster, that's always considered the
+ // issuing cluster.
+ // Otherwise, if this is a v2 token, use the UUID to find the issuing
+ // cluster.
+ // Note that remoteBE may still be conn.local even *after* one of these
+ // conditions is true.
+ var remoteBE backend = conn.local
+ if conn.cluster.Login.LoginCluster != "" {
+ remoteBE = conn.chooseBackend(conn.cluster.Login.LoginCluster)
+ } else {
+ reqauth, ok := auth.FromContext(ctx)
+ if ok && len(reqauth.Tokens) > 0 && v2TokenRegexp.MatchString(reqauth.Tokens[0]) {
+ remoteBE = conn.chooseBackend(reqauth.Tokens[0][3:8])
+ }
}
- baseURL := remote.BaseURL()
- target, err := baseURL.Parse(arvados.EndpointLogout.Path)
- if err != nil {
- return arvados.LogoutResponse{}, fmt.Errorf("internal error getting redirect target: %s", err)
+
+ // We always want to invalidate the token locally. Start that process.
+ var localResponse arvados.LogoutResponse
+ var localErr error
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ localResponse, localErr = conn.local.Logout(ctx, options)
+ wg.Done()
+ }()
+
+ // If the token was issued by another cluster, log out there too.
+ if remoteBE != conn.local {
+ response, err := remoteBE.Logout(ctx, options)
+ // If the issuing cluster returns a redirect or error, that's more
+ // important to return to the user than anything that happens locally.
+ if response.RedirectLocation != "" || err != nil {
+ return response, err
+ }
}
- target.RawQuery = url.Values{"return_to": {options.ReturnTo}}.Encode()
- return arvados.LogoutResponse{RedirectLocation: target.String()}, nil
+
+ // Either the local cluster is the issuing cluster, or the issuing cluster's
+ // response was uninteresting.
+ wg.Wait()
+ return localResponse, localErr
+}
+
+func (conn *Conn) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {
+ return conn.chooseBackend(options.ClusterID).AuthorizedKeyCreate(ctx, options)
+}
+
+func (conn *Conn) AuthorizedKeyUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.AuthorizedKey, error) {
+ return conn.chooseBackend(options.UUID).AuthorizedKeyUpdate(ctx, options)
+}
+
+func (conn *Conn) AuthorizedKeyGet(ctx context.Context, options arvados.GetOptions) (arvados.AuthorizedKey, error) {
+ return conn.chooseBackend(options.UUID).AuthorizedKeyGet(ctx, options)
+}
+
+func (conn *Conn) AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {
+ return conn.generated_AuthorizedKeyList(ctx, options)
+}
+
+func (conn *Conn) AuthorizedKeyDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.AuthorizedKey, error) {
+ return conn.chooseBackend(options.UUID).AuthorizedKeyDelete(ctx, options)
}
func (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
@@ -276,6 +334,9 @@ func (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions)
}
return c, err
}
+ if len(options.UUID) < 34 || options.UUID[32] != '+' {
+ return arvados.Collection{}, httpErrorf(http.StatusNotFound, "invalid UUID or PDH %q", options.UUID)
+ }
// UUID is a PDH
first := make(chan arvados.Collection, 1)
err := conn.tryLocalThenRemotes(ctx, options.ForwardedFor, func(ctx context.Context, remoteID string, be backend) error {
@@ -359,6 +420,10 @@ func (conn *Conn) ContainerUpdate(ctx context.Context, options arvados.UpdateOpt
return conn.chooseBackend(options.UUID).ContainerUpdate(ctx, options)
}
+func (conn *Conn) ContainerPriorityUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
+ return conn.chooseBackend(options.UUID).ContainerPriorityUpdate(ctx, options)
+}
+
func (conn *Conn) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
return conn.chooseBackend(options.UUID).ContainerGet(ctx, options)
}
@@ -445,6 +510,14 @@ func (conn *Conn) ContainerRequestDelete(ctx context.Context, options arvados.De
return conn.chooseBackend(options.UUID).ContainerRequestDelete(ctx, options)
}
+func (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, options arvados.GetOptions) (arvados.ContainerStatus, error) {
+ return conn.chooseBackend(options.UUID).ContainerRequestContainerStatus(ctx, options)
+}
+
+func (conn *Conn) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (http.Handler, error) {
+ return conn.chooseBackend(options.UUID).ContainerRequestLog(ctx, options)
+}
+
func (conn *Conn) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {
return conn.chooseBackend(options.ClusterID).GroupCreate(ctx, options)
}
@@ -512,6 +585,26 @@ func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions)
return conn.chooseBackend(options.UUID).LinkDelete(ctx, options)
}
+func (conn *Conn) LogCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Log, error) {
+ return conn.chooseBackend(options.ClusterID).LogCreate(ctx, options)
+}
+
+func (conn *Conn) LogUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Log, error) {
+ return conn.chooseBackend(options.UUID).LogUpdate(ctx, options)
+}
+
+func (conn *Conn) LogGet(ctx context.Context, options arvados.GetOptions) (arvados.Log, error) {
+ return conn.chooseBackend(options.UUID).LogGet(ctx, options)
+}
+
+func (conn *Conn) LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {
+ return conn.generated_LogList(ctx, options)
+}
+
+func (conn *Conn) LogDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Log, error) {
+ return conn.chooseBackend(options.UUID).LogDelete(ctx, options)
+}
+
func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
return conn.generated_SpecimenList(ctx, options)
}
@@ -542,6 +635,7 @@ var userAttrsCachedFromLoginCluster = map[string]bool{
"first_name": true,
"is_active": true,
"is_admin": true,
+ "is_invited": true,
"last_name": true,
"modified_at": true,
"prefs": true,
@@ -551,7 +645,6 @@ var userAttrsCachedFromLoginCluster = map[string]bool{
"etag": false,
"full_name": false,
"identity_url": false,
- "is_invited": false,
"modified_by_client_uuid": false,
"modified_by_user_uuid": false,
"owner_uuid": false,
@@ -563,7 +656,8 @@ var userAttrsCachedFromLoginCluster = map[string]bool{
func (conn *Conn) batchUpdateUsers(ctx context.Context,
options arvados.ListOptions,
- items []arvados.User) (err error) {
+ items []arvados.User,
+ includeAdminAndInvited bool) (err error) {
id := conn.cluster.Login.LoginCluster
logger := ctxlog.FromContext(ctx)
@@ -610,6 +704,11 @@ func (conn *Conn) batchUpdateUsers(ctx context.Context,
}
}
}
+ if !includeAdminAndInvited {
+ // make sure we don't send these fields.
+ delete(updates, "is_admin")
+ delete(updates, "is_invited")
+ }
batchOpts.Updates[user.UUID] = updates
}
if len(batchOpts.Updates) > 0 {
@@ -622,13 +721,47 @@ func (conn *Conn) batchUpdateUsers(ctx context.Context,
return nil
}
+func (conn *Conn) includeAdminAndInvitedInBatchUpdate(ctx context.Context, be backend, updateUserUUID string) (bool, error) {
+ // API versions prior to 20231117 would only include the
+ // is_invited and is_admin fields if the current user is an
+ // admin, or is requesting their own user record. If those
+ // fields aren't actually valid then we don't want to
+ // send them in the batch update.
+ dd, err := be.DiscoveryDocument(ctx)
+ if err != nil {
+ // couldn't get discovery document
+ return false, err
+ }
+ if dd.Revision >= "20231117" {
+ // newer version, fields are valid.
+ return true, nil
+ }
+ selfuser, err := be.UserGetCurrent(ctx, arvados.GetOptions{})
+ if err != nil {
+ // couldn't get our user record
+ return false, err
+ }
+ if selfuser.IsAdmin || selfuser.UUID == updateUserUUID {
+ // we are an admin, or the current user is the same as
+ // the user that we are updating.
+ return true, nil
+ }
+ // Better safe than sorry.
+ return false, nil
+}
+
func (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID && !options.BypassFederation {
- resp, err := conn.chooseBackend(id).UserList(ctx, options)
+ be := conn.chooseBackend(id)
+ resp, err := be.UserList(ctx, options)
if err != nil {
return resp, err
}
- err = conn.batchUpdateUsers(ctx, options, resp.Items)
+ includeAdminAndInvited, err := conn.includeAdminAndInvitedInBatchUpdate(ctx, be, "")
+ if err != nil {
+ return arvados.UserList{}, err
+ }
+ err = conn.batchUpdateUsers(ctx, options, resp.Items, includeAdminAndInvited)
if err != nil {
return arvados.UserList{}, err
}
@@ -645,13 +778,18 @@ func (conn *Conn) UserUpdate(ctx context.Context, options arvados.UpdateOptions)
if options.BypassFederation {
return conn.local.UserUpdate(ctx, options)
}
- resp, err := conn.chooseBackend(options.UUID).UserUpdate(ctx, options)
+ be := conn.chooseBackend(options.UUID)
+ resp, err := be.UserUpdate(ctx, options)
if err != nil {
return resp, err
}
if !strings.HasPrefix(options.UUID, conn.cluster.ClusterID) {
+ includeAdminAndInvited, err := conn.includeAdminAndInvitedInBatchUpdate(ctx, be, options.UUID)
+ if err != nil {
+ return arvados.User{}, err
+ }
// Copy the updated user record to the local cluster
- err = conn.batchUpdateUsers(ctx, arvados.ListOptions{}, []arvados.User{resp})
+ err = conn.batchUpdateUsers(ctx, arvados.ListOptions{}, []arvados.User{resp}, includeAdminAndInvited)
if err != nil {
return arvados.User{}, err
}
@@ -698,7 +836,8 @@ func (conn *Conn) UserUnsetup(ctx context.Context, options arvados.GetOptions) (
}
func (conn *Conn) UserGet(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
- resp, err := conn.chooseBackend(options.UUID).UserGet(ctx, options)
+ be := conn.chooseBackend(options.UUID)
+ resp, err := be.UserGet(ctx, options)
if err != nil {
return resp, err
}
@@ -706,7 +845,11 @@ func (conn *Conn) UserGet(ctx context.Context, options arvados.GetOptions) (arva
return arvados.User{}, httpErrorf(http.StatusBadGateway, "Had requested %v but response was for %v", options.UUID, resp.UUID)
}
if options.UUID[:5] != conn.cluster.ClusterID {
- err = conn.batchUpdateUsers(ctx, arvados.ListOptions{Select: options.Select}, []arvados.User{resp})
+ includeAdminAndInvited, err := conn.includeAdminAndInvitedInBatchUpdate(ctx, be, options.UUID)
+ if err != nil {
+ return arvados.User{}, err
+ }
+ err = conn.batchUpdateUsers(ctx, arvados.ListOptions{Select: options.Select}, []arvados.User{resp}, includeAdminAndInvited)
if err != nil {
return arvados.User{}, err
}
diff --git a/lib/controller/federation/federation_test.go b/lib/controller/federation/federation_test.go
index 5460e938a6..6e85dfdba2 100644
--- a/lib/controller/federation/federation_test.go
+++ b/lib/controller/federation/federation_test.go
@@ -70,7 +70,7 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
ctx = ctrlctx.NewWithTransaction(ctx, s.tx)
s.ctx = ctx
- s.fed = New(s.cluster, nil)
+ s.fed = New(ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
}
func (s *FederationSuite) TearDownTest(c *check.C) {
diff --git a/lib/controller/federation/generate.go b/lib/controller/federation/generate.go
index 8af6131564..2dc2918f79 100644
--- a/lib/controller/federation/generate.go
+++ b/lib/controller/federation/generate.go
@@ -53,7 +53,7 @@ func main() {
defer out.Close()
out.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\n\)\n`).Find(buf))
io.WriteString(out, "//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\n")
- for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User", "Link", "APIClientAuthorization"} {
+ for _, t := range []string{"AuthorizedKey", "Container", "ContainerRequest", "Group", "Specimen", "User", "Link", "Log", "APIClientAuthorization"} {
_, err := out.Write(bytes.ReplaceAll(orig, []byte("Collection"), []byte(t)))
if err != nil {
panic(err)
diff --git a/lib/controller/federation/generated.go b/lib/controller/federation/generated.go
index 66f36161d5..8c8666fea1 100755
--- a/lib/controller/federation/generated.go
+++ b/lib/controller/federation/generated.go
@@ -17,6 +17,47 @@ import (
// -- this file is auto-generated -- do not edit -- edit list.go and run "go generate" instead --
//
+func (conn *Conn) generated_AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {
+ var mtx sync.Mutex
+ var merged arvados.AuthorizedKeyList
+ var needSort atomic.Value
+ needSort.Store(false)
+ err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
+ options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
+ cl, err := backend.AuthorizedKeyList(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ mtx.Lock()
+ defer mtx.Unlock()
+ if len(merged.Items) == 0 {
+ merged = cl
+ } else if len(cl.Items) > 0 {
+ merged.Items = append(merged.Items, cl.Items...)
+ needSort.Store(true)
+ }
+ uuids := make([]string, 0, len(cl.Items))
+ for _, item := range cl.Items {
+ uuids = append(uuids, item.UUID)
+ }
+ return uuids, nil
+ })
+ if needSort.Load().(bool) {
+ // Apply the default/implied order, "modified_at desc"
+ sort.Slice(merged.Items, func(i, j int) bool {
+ mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
+ return mj.Before(mi)
+ })
+ }
+ if merged.Items == nil {
+ // Return empty results as [], not null
+ // (https://github.com/golang/go/issues/27589 might be
+ // a better solution in the future)
+ merged.Items = []arvados.AuthorizedKey{}
+ }
+ return merged, err
+}
+
func (conn *Conn) generated_ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
var mtx sync.Mutex
var merged arvados.ContainerList
@@ -263,6 +304,47 @@ func (conn *Conn) generated_LinkList(ctx context.Context, options arvados.ListOp
return merged, err
}
+func (conn *Conn) generated_LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {
+ var mtx sync.Mutex
+ var merged arvados.LogList
+ var needSort atomic.Value
+ needSort.Store(false)
+ err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
+ options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
+ cl, err := backend.LogList(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ mtx.Lock()
+ defer mtx.Unlock()
+ if len(merged.Items) == 0 {
+ merged = cl
+ } else if len(cl.Items) > 0 {
+ merged.Items = append(merged.Items, cl.Items...)
+ needSort.Store(true)
+ }
+ uuids := make([]string, 0, len(cl.Items))
+ for _, item := range cl.Items {
+ uuids = append(uuids, item.UUID)
+ }
+ return uuids, nil
+ })
+ if needSort.Load().(bool) {
+ // Apply the default/implied order, "modified_at desc"
+ sort.Slice(merged.Items, func(i, j int) bool {
+ mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
+ return mj.Before(mi)
+ })
+ }
+ if merged.Items == nil {
+ // Return empty results as [], not null
+ // (https://github.com/golang/go/issues/27589 might be
+ // a better solution in the future)
+ merged.Items = []arvados.Log{}
+ }
+ return merged, err
+}
+
func (conn *Conn) generated_APIClientAuthorizationList(ctx context.Context, options arvados.ListOptions) (arvados.APIClientAuthorizationList, error) {
var mtx sync.Mutex
var merged arvados.APIClientAuthorizationList
diff --git a/lib/controller/federation/group_test.go b/lib/controller/federation/group_test.go
index 1ee6f58764..a62120c587 100644
--- a/lib/controller/federation/group_test.go
+++ b/lib/controller/federation/group_test.go
@@ -5,6 +5,7 @@
package federation
import (
+ "context"
"errors"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -21,7 +22,7 @@ type GroupSuite struct {
func makeConn() (*Conn, *arvadostest.APIStub, *arvadostest.APIStub) {
localAPIstub := &arvadostest.APIStub{Error: errors.New("No result")}
remoteAPIstub := &arvadostest.APIStub{Error: errors.New("No result")}
- return &Conn{&arvados.Cluster{ClusterID: "local"}, localAPIstub, map[string]backend{"zzzzz": remoteAPIstub}}, localAPIstub, remoteAPIstub
+ return &Conn{context.Background(), &arvados.Cluster{ClusterID: "local"}, localAPIstub, map[string]backend{"zzzzz": remoteAPIstub}}, localAPIstub, remoteAPIstub
}
func (s *UserSuite) TestGroupContents(c *check.C) {
diff --git a/lib/controller/federation/list.go b/lib/controller/federation/list.go
index 039caac574..329066d1dc 100644
--- a/lib/controller/federation/list.go
+++ b/lib/controller/federation/list.go
@@ -65,13 +65,13 @@ func (conn *Conn) generated_CollectionList(ctx context.Context, options arvados.
// Call fn on one or more local/remote backends if opts indicates a
// federation-wide list query, i.e.:
//
-// * There is at least one filter of the form
-// ["uuid","in",[a,b,c,...]] or ["uuid","=",a]
+// - There is at least one filter of the form
+// ["uuid","in",[a,b,c,...]] or ["uuid","=",a]
//
-// * One or more of the supplied UUIDs (a,b,c,...) has a non-local
-// prefix.
+// - One or more of the supplied UUIDs (a,b,c,...) has a non-local
+// prefix.
//
-// * There are no other filters
+// - There are no other filters
//
// (If opts doesn't indicate a federation-wide list query, fn is just
// called once with the local backend.)
@@ -79,29 +79,29 @@ func (conn *Conn) generated_CollectionList(ctx context.Context, options arvados.
// fn is called more than once only if the query meets the following
// restrictions:
//
-// * Count=="none"
+// - Count=="none"
//
-// * Limit<0
+// - Limit<0
//
-// * len(Order)==0
+// - len(Order)==0
//
-// * Each filter is either "uuid = ..." or "uuid in [...]".
+// - Each filter is either "uuid = ..." or "uuid in [...]".
//
-// * The maximum possible response size (total number of objects that
-// could potentially be matched by all of the specified filters)
-// exceeds the local cluster's response page size limit.
+// - The maximum possible response size (total number of objects
+// that could potentially be matched by all of the specified
+// filters) exceeds the local cluster's response page size limit.
//
// If the query involves multiple backends but doesn't meet these
// restrictions, an error is returned without calling fn.
//
// Thus, the caller can assume that either:
//
-// * splitListRequest() returns an error, or
+// - splitListRequest() returns an error, or
//
-// * fn is called exactly once, or
+// - fn is called exactly once, or
//
-// * fn is called more than once, with options that satisfy the above
-// restrictions.
+// - fn is called more than once, with options that satisfy the above
+// restrictions.
//
// Each call to fn indicates a single (local or remote) backend and a
// corresponding options argument suitable for sending to that
diff --git a/lib/controller/federation/login_test.go b/lib/controller/federation/login_test.go
index c05ebfce69..ab39619c79 100644
--- a/lib/controller/federation/login_test.go
+++ b/lib/controller/federation/login_test.go
@@ -10,7 +10,6 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
check "gopkg.in/check.v1"
)
@@ -39,38 +38,3 @@ func (s *LoginSuite) TestDeferToLoginCluster(c *check.C) {
c.Check(remotePresent, check.Equals, remote != "")
}
}
-
-func (s *LoginSuite) TestLogout(c *check.C) {
- s.cluster.Services.Workbench1.ExternalURL = arvados.URL{Scheme: "https", Host: "workbench1.example.com"}
- s.cluster.Services.Workbench2.ExternalURL = arvados.URL{Scheme: "https", Host: "workbench2.example.com"}
- s.addHTTPRemote(c, "zhome", &arvadostest.APIStub{})
- s.cluster.Login.LoginCluster = "zhome"
- // s.fed is already set by SetUpTest, but we need to
- // reinitialize with the above config changes.
- s.fed = New(s.cluster, nil)
-
- returnTo := "https://app.example.com/foo?bar"
- for _, trial := range []struct {
- token string
- returnTo string
- target string
- }{
- {token: "", returnTo: "", target: s.cluster.Services.Workbench2.ExternalURL.String()},
- {token: "", returnTo: returnTo, target: returnTo},
- {token: "zzzzzzzzzzzzzzzzzzzzz", returnTo: returnTo, target: returnTo},
- {token: "v2/zzzzz-aaaaa-aaaaaaaaaaaaaaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", returnTo: returnTo, target: returnTo},
- {token: "v2/zhome-aaaaa-aaaaaaaaaaaaaaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", returnTo: returnTo, target: "http://" + s.cluster.RemoteClusters["zhome"].Host + "/logout?" + url.Values{"return_to": {returnTo}}.Encode()},
- } {
- c.Logf("trial %#v", trial)
- ctx := s.ctx
- if trial.token != "" {
- ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{trial.token}})
- }
- resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: trial.returnTo})
- c.Assert(err, check.IsNil)
- c.Logf(" RedirectLocation %q", resp.RedirectLocation)
- target, err := url.Parse(resp.RedirectLocation)
- c.Check(err, check.IsNil)
- c.Check(target.String(), check.Equals, trial.target)
- }
-}
diff --git a/lib/controller/federation/logout_test.go b/lib/controller/federation/logout_test.go
new file mode 100644
index 0000000000..af6f6d9ed2
--- /dev/null
+++ b/lib/controller/federation/logout_test.go
@@ -0,0 +1,246 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package federation
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&LogoutSuite{})
+var emptyURL = &url.URL{}
+
+type LogoutStub struct {
+ arvadostest.APIStub
+ redirectLocation *url.URL
+}
+
+func (as *LogoutStub) CheckCalls(c *check.C, returnURL *url.URL) bool {
+ actual := as.APIStub.Calls(as.APIStub.Logout)
+ allOK := c.Check(actual, check.Not(check.HasLen), 0,
+ check.Commentf("Logout stub never called"))
+ expected := returnURL.String()
+ for _, call := range actual {
+ opts, ok := call.Options.(arvados.LogoutOptions)
+ allOK = c.Check(ok, check.Equals, true,
+ check.Commentf("call options were not LogoutOptions")) &&
+ c.Check(opts.ReturnTo, check.Equals, expected) &&
+ allOK
+ }
+ return allOK
+}
+
+func (as *LogoutStub) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+ as.APIStub.Logout(ctx, options)
+ loc := as.redirectLocation.String()
+ if loc == "" {
+ loc = options.ReturnTo
+ }
+ return arvados.LogoutResponse{
+ RedirectLocation: loc,
+ }, as.Error
+}
+
+type LogoutSuite struct {
+ FederationSuite
+}
+
+func (s *LogoutSuite) badReturnURL(path string) *url.URL {
+ return &url.URL{
+ Scheme: "https",
+ Host: "example.net",
+ Path: path,
+ }
+}
+
+func (s *LogoutSuite) goodReturnURL(path string) *url.URL {
+ u, _ := url.Parse(s.cluster.Services.Workbench2.ExternalURL.String())
+ u.Path = path
+ return u
+}
+
+func (s *LogoutSuite) setupFederation(loginCluster string) {
+ if loginCluster == "" {
+ s.cluster.Login.Test.Enable = true
+ } else {
+ s.cluster.Login.LoginCluster = loginCluster
+ }
+ dbconn := ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}
+ s.fed = New(s.ctx, s.cluster, nil, dbconn.GetDB)
+}
+
+func (s *LogoutSuite) setupStub(c *check.C, id string, stubURL *url.URL, stubErr error) *LogoutStub {
+ loc, err := url.Parse(stubURL.String())
+ c.Check(err, check.IsNil)
+ stub := LogoutStub{redirectLocation: loc}
+ stub.Error = stubErr
+ if id == s.cluster.ClusterID {
+ s.fed.local = &stub
+ } else {
+ s.addDirectRemote(c, id, &stub)
+ }
+ return &stub
+}
+
+func (s *LogoutSuite) v2Token(clusterID string) string {
+ return fmt.Sprintf("v2/%s-gj3su-12345abcde67890/abcdefghijklmnopqrstuvwxy", clusterID)
+}
+
+func (s *LogoutSuite) TestLocalLogoutOK(c *check.C) {
+ s.setupFederation("")
+ resp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, s.cluster.Services.Workbench2.ExternalURL.String())
+}
+
+func (s *LogoutSuite) TestLocalLogoutRedirect(c *check.C) {
+ s.setupFederation("")
+ expURL := s.cluster.Services.Workbench1.ExternalURL
+ opts := arvados.LogoutOptions{ReturnTo: expURL.String()}
+ resp, err := s.fed.Logout(s.ctx, opts)
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, expURL.String())
+}
+
+func (s *LogoutSuite) TestLocalLogoutBadRequestError(c *check.C) {
+ s.setupFederation("")
+ returnTo := s.badReturnURL("TestLocalLogoutBadRequestError")
+ opts := arvados.LogoutOptions{ReturnTo: returnTo.String()}
+ _, err := s.fed.Logout(s.ctx, opts)
+ c.Check(err, check.NotNil)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutRedirect(c *check.C) {
+ s.setupFederation("zhome")
+ redirect := url.URL{Scheme: "https", Host: "example.com"}
+ loginStub := s.setupStub(c, "zhome", &redirect, nil)
+ returnTo := s.goodReturnURL("TestRemoteLogoutRedirect")
+ resp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+ loginStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutError(c *check.C) {
+ s.setupFederation("zhome")
+ expErr := errors.New("TestRemoteLogoutError expErr")
+ loginStub := s.setupStub(c, "zhome", emptyURL, expErr)
+ returnTo := s.goodReturnURL("TestRemoteLogoutError")
+ _, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+ c.Check(err, check.Equals, expErr)
+ loginStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutLocalRedirect(c *check.C) {
+ s.setupFederation("zhome")
+ loginStub := s.setupStub(c, "zhome", emptyURL, nil)
+ redirect := url.URL{Scheme: "https", Host: "example.com"}
+ localStub := s.setupStub(c, "aaaaa", &redirect, nil)
+ resp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+ // emptyURL to match the empty LogoutOptions
+ loginStub.CheckCalls(c, emptyURL)
+ localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutLocalError(c *check.C) {
+ s.setupFederation("zhome")
+ expErr := errors.New("TestRemoteLogoutLocalError expErr")
+ loginStub := s.setupStub(c, "zhome", emptyURL, nil)
+ localStub := s.setupStub(c, "aaaaa", emptyURL, expErr)
+ _, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})
+ c.Check(err, check.Equals, expErr)
+ loginStub.CheckCalls(c, emptyURL)
+ localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestV2TokenRedirect(c *check.C) {
+ s.setupFederation("")
+ redirect := url.URL{Scheme: "https", Host: "example.com"}
+ returnTo := s.goodReturnURL("TestV2TokenRedirect")
+ localErr := errors.New("TestV2TokenRedirect error")
+ tokenStub := s.setupStub(c, "zzzzz", &redirect, nil)
+ s.setupStub(c, "aaaaa", emptyURL, localErr)
+ tokens := []string{s.v2Token("zzzzz")}
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+ resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+ tokenStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestV2TokenError(c *check.C) {
+ s.setupFederation("")
+ returnTo := s.goodReturnURL("TestV2TokenError")
+ tokenErr := errors.New("TestV2TokenError error")
+ tokenStub := s.setupStub(c, "zzzzz", emptyURL, tokenErr)
+ s.setupStub(c, "aaaaa", emptyURL, nil)
+ tokens := []string{s.v2Token("zzzzz")}
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+ _, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+ c.Check(err, check.Equals, tokenErr)
+ tokenStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestV2TokenLocalRedirect(c *check.C) {
+ s.setupFederation("")
+ redirect := url.URL{Scheme: "https", Host: "example.com"}
+ tokenStub := s.setupStub(c, "zzzzz", emptyURL, nil)
+ localStub := s.setupStub(c, "aaaaa", &redirect, nil)
+ tokens := []string{s.v2Token("zzzzz")}
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+ resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+ tokenStub.CheckCalls(c, emptyURL)
+ localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestV2TokenLocalError(c *check.C) {
+ s.setupFederation("")
+ tokenErr := errors.New("TestV2TokenLocalError error")
+ tokenStub := s.setupStub(c, "zzzzz", emptyURL, nil)
+ localStub := s.setupStub(c, "aaaaa", emptyURL, tokenErr)
+ tokens := []string{s.v2Token("zzzzz")}
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+ _, err := s.fed.Logout(ctx, arvados.LogoutOptions{})
+ c.Check(err, check.Equals, tokenErr)
+ tokenStub.CheckCalls(c, emptyURL)
+ localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestV2LocalTokenRedirect(c *check.C) {
+ s.setupFederation("")
+ redirect := url.URL{Scheme: "https", Host: "example.com"}
+ returnTo := s.goodReturnURL("TestV2LocalTokenRedirect")
+ localStub := s.setupStub(c, "aaaaa", &redirect, nil)
+ tokens := []string{s.v2Token("aaaaa")}
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+ resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+ localStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestV2LocalTokenError(c *check.C) {
+ s.setupFederation("")
+ returnTo := s.goodReturnURL("TestV2LocalTokenError")
+ tokenErr := errors.New("TestV2LocalTokenError error")
+ localStub := s.setupStub(c, "aaaaa", emptyURL, tokenErr)
+ tokens := []string{s.v2Token("aaaaa")}
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+ _, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+ c.Check(err, check.Equals, tokenErr)
+ localStub.CheckCalls(c, returnTo)
+}
diff --git a/lib/controller/federation/user_test.go b/lib/controller/federation/user_test.go
index 064f8ce5d0..33bc95d0ea 100644
--- a/lib/controller/federation/user_test.go
+++ b/lib/controller/federation/user_test.go
@@ -14,6 +14,7 @@ import (
"strings"
"git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/auth"
@@ -30,7 +31,7 @@ type UserSuite struct {
func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
s.cluster.ClusterID = "local"
s.cluster.Login.LoginCluster = "zzzzz"
- s.fed = New(s.cluster, nil)
+ s.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
for _, updateFail := range []bool{false, true} {
@@ -77,7 +78,7 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
"identity_url": false,
// virtual attrs
"full_name": false,
- "is_invited": false,
+ "is_invited": true,
}
if opts.Select != nil {
// Only the selected
@@ -120,7 +121,7 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
s.cluster.ClusterID = "local"
s.cluster.Login.LoginCluster = "zzzzz"
- s.fed = New(s.cluster, nil)
+ s.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
opts := arvados.GetOptions{UUID: "zzzzz-tpzed-xurymjxw79nv3jz", Select: []string{"uuid", "email"}}
@@ -145,7 +146,7 @@ func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
"identity_url": false,
// virtual attrs
"full_name": false,
- "is_invited": false,
+ "is_invited": true,
}
if opts.Select != nil {
// Only the selected
@@ -174,7 +175,7 @@ func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
func (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {
s.cluster.ClusterID = "local"
s.cluster.Login.LoginCluster = "zzzzz"
- s.fed = New(s.cluster, nil)
+ s.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")},
true, rpc.PassthroughTokenProvider))
diff --git a/lib/controller/federation_test.go b/lib/controller/federation_test.go
index a3b198ffc9..599686e3e6 100644
--- a/lib/controller/federation_test.go
+++ b/lib/controller/federation_test.go
@@ -32,6 +32,9 @@ import (
var _ = check.Suite(&FederationSuite{})
type FederationSuite struct {
+ ctx context.Context
+ cancel context.CancelFunc
+
log logrus.FieldLogger
// testServer and testHandler are the controller being tested,
// "zhome".
@@ -48,6 +51,7 @@ type FederationSuite struct {
}
func (s *FederationSuite) SetUpTest(c *check.C) {
+ s.ctx, s.cancel = context.WithCancel(context.Background())
s.log = ctxlog.TestLogger(c)
s.remoteServer = newServerFromIntegrationTestEnv(c)
@@ -70,7 +74,7 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour * 24 * 14)
arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "http://localhost:1/")
arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
- s.testHandler = &Handler{Cluster: cluster, BackgroundContext: ctxlog.Context(context.Background(), s.log)}
+ s.testHandler = &Handler{Cluster: cluster, BackgroundContext: ctxlog.Context(s.ctx, s.log)}
s.testServer = newServerFromIntegrationTestEnv(c)
s.testServer.Server.BaseContext = func(net.Listener) context.Context {
return ctxlog.Context(context.Background(), s.log)
@@ -115,6 +119,7 @@ func (s *FederationSuite) TearDownTest(c *check.C) {
if s.testServer != nil {
s.testServer.Close()
}
+ s.cancel()
}
func (s *FederationSuite) testRequest(req *http.Request) *httptest.ResponseRecorder {
@@ -702,7 +707,7 @@ func (s *FederationSuite) TestCreateRemoteContainerRequestCheckRuntimeToken(c *c
s.testHandler.Cluster.API.MaxTokenLifetime = arvados.Duration(time.Hour)
resp := s.testRequest(req).Result()
- c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ c.Assert(resp.StatusCode, check.Equals, http.StatusOK)
cr := s.getCRfromMockRequest(c)
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
index 665fd5c636..7c4bb0912f 100644
--- a/lib/controller/handler.go
+++ b/lib/controller/handler.go
@@ -6,13 +6,17 @@ package controller
import (
"context"
+ "encoding/json"
"errors"
"fmt"
+ "io/ioutil"
+ "mime"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"sync"
+ "time"
"git.arvados.org/arvados.git/lib/controller/api"
"git.arvados.org/arvados.git/lib/controller/federation"
@@ -24,7 +28,6 @@ import (
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/health"
"git.arvados.org/arvados.git/sdk/go/httpserver"
- "github.com/jmoiron/sqlx"
// sqlx needs lib/pq to talk to PostgreSQL
_ "github.com/lib/pq"
@@ -40,8 +43,10 @@ type Handler struct {
proxy *proxy
secureClient *http.Client
insecureClient *http.Client
- pgdb *sqlx.DB
- pgdbMtx sync.Mutex
+ dbConnector ctrlctx.DBConnector
+ limitLogCreate chan struct{}
+
+ cache map[string]*cacheEnt
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@@ -65,7 +70,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
func (h *Handler) CheckHealth() error {
h.setupOnce.Do(h.setup)
- _, err := h.db(context.TODO())
+ _, err := h.dbConnector.GetDB(context.TODO())
if err != nil {
return err
}
@@ -97,14 +102,22 @@ func (h *Handler) setup() {
mux := http.NewServeMux()
healthFuncs := make(map[string]health.Func)
- oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
- h.federation = federation.New(h.Cluster, &healthFuncs)
+ h.dbConnector = ctrlctx.DBConnector{PostgreSQL: h.Cluster.PostgreSQL}
+ go func() {
+ <-h.BackgroundContext.Done()
+ h.dbConnector.Close()
+ }()
+ oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.dbConnector.GetDB)
+ h.federation = federation.New(h.BackgroundContext, h.Cluster, &healthFuncs, h.dbConnector.GetDB)
rtr := router.New(h.federation, router.Config{
MaxRequestSize: h.Cluster.API.MaxRequestSize,
- WrapCalls: api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls),
+ WrapCalls: api.ComposeWrappers(
+ ctrlctx.WrapCallsInTransactions(h.dbConnector.GetDB),
+ oidcAuthorizer.WrapCalls,
+ ctrlctx.WrapCallsWithAuth(h.Cluster)),
})
- healthRoutes := health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }}
+ healthRoutes := health.Routes{"ping": func() error { _, err := h.dbConnector.GetDB(context.TODO()); return err }}
for name, f := range healthFuncs {
healthRoutes[name] = f
}
@@ -127,6 +140,8 @@ func (h *Handler) setup() {
mux.Handle("/arvados/v1/groups/", rtr)
mux.Handle("/arvados/v1/links", rtr)
mux.Handle("/arvados/v1/links/", rtr)
+ mux.Handle("/arvados/v1/authorized_keys", rtr)
+ mux.Handle("/arvados/v1/authorized_keys/", rtr)
mux.Handle("/login", rtr)
mux.Handle("/logout", rtr)
mux.Handle("/arvados/v1/api_client_authorizations", rtr)
@@ -134,6 +149,8 @@ func (h *Handler) setup() {
hs := http.NotFoundHandler()
hs = prepend(hs, h.proxyRailsAPI)
+ hs = prepend(hs, h.routeContainerEndpoints(rtr))
+ hs = prepend(hs, h.limitLogCreateRequests)
hs = h.setupProxyRemoteCluster(hs)
hs = prepend(hs, oidcAuthorizer.Middleware)
mux.Handle("/", hs)
@@ -147,36 +164,21 @@ func (h *Handler) setup() {
ic.CheckRedirect = neverRedirect
h.insecureClient = &ic
+ logCreateLimit := int(float64(h.Cluster.API.MaxConcurrentRequests) * h.Cluster.API.LogCreateRequestFraction)
+ if logCreateLimit == 0 && h.Cluster.API.LogCreateRequestFraction > 0 {
+ logCreateLimit = 1
+ }
+ h.limitLogCreate = make(chan struct{}, logCreateLimit)
+
h.proxy = &proxy{
Name: "arvados-controller",
}
-
- go h.trashSweepWorker()
-}
-
-var errDBConnection = errors.New("database connection error")
-
-func (h *Handler) db(ctx context.Context) (*sqlx.DB, error) {
- h.pgdbMtx.Lock()
- defer h.pgdbMtx.Unlock()
- if h.pgdb != nil {
- return h.pgdb, nil
+ h.cache = map[string]*cacheEnt{
+ "/discovery/v1/apis/arvados/v1/rest": &cacheEnt{validate: validateDiscoveryDoc},
}
- db, err := sqlx.Open("postgres", h.Cluster.PostgreSQL.Connection.String())
- if err != nil {
- ctxlog.FromContext(ctx).WithError(err).Error("postgresql connect failed")
- return nil, errDBConnection
- }
- if p := h.Cluster.PostgreSQL.ConnectionPool; p > 0 {
- db.SetMaxOpenConns(p)
- }
- if err := db.Ping(); err != nil {
- ctxlog.FromContext(ctx).WithError(err).Error("postgresql connect succeeded but ping failed")
- return nil, errDBConnection
- }
- h.pgdb = db
- return db, nil
+ go h.trashSweepWorker()
+ go h.containerLogSweepWorker()
}
type middlewareFunc func(http.ResponseWriter, *http.Request, http.Handler)
@@ -203,10 +205,169 @@ func (h *Handler) localClusterRequest(req *http.Request) (*http.Response, error)
if insecure {
client = h.insecureClient
}
+ // Clearing the Host field here causes the Go http client to
+ // use the host part of urlOut as the Host header in the
+ // outgoing request, instead of the Host value from the
+ // original request we received.
+ req.Host = ""
return h.proxy.Do(req, urlOut, client)
}
+// Route /arvados/v1/containers/{uuid}/log*, .../ssh, and
+// .../gateway_tunnel to rtr, pass everything else to next.
+//
+// (http.ServeMux doesn't let us route these without also routing
+// everything under /containers/, which we don't want yet.)
+func (h *Handler) routeContainerEndpoints(rtr http.Handler) middlewareFunc {
+ return func(w http.ResponseWriter, req *http.Request, next http.Handler) {
+ trim := strings.TrimPrefix(req.URL.Path, "/arvados/v1/containers/")
+ if trim != req.URL.Path && (strings.Index(trim, "/log") == 27 ||
+ strings.Index(trim, "/ssh") == 27 ||
+ strings.Index(trim, "/gateway_tunnel") == 27) {
+ rtr.ServeHTTP(w, req)
+ } else {
+ next.ServeHTTP(w, req)
+ }
+ }
+}
+
+func (h *Handler) limitLogCreateRequests(w http.ResponseWriter, req *http.Request, next http.Handler) {
+ if cap(h.limitLogCreate) > 0 && req.Method == http.MethodPost && strings.HasPrefix(req.URL.Path, "/arvados/v1/logs") {
+ select {
+ case h.limitLogCreate <- struct{}{}:
+ defer func() { <-h.limitLogCreate }()
+ next.ServeHTTP(w, req)
+ default:
+ http.Error(w, "Excess log messages", http.StatusServiceUnavailable)
+ }
+ return
+ }
+ next.ServeHTTP(w, req)
+}
+
+// cacheEnt implements a basic stale-while-revalidate cache, suitable
+// for the Arvados discovery document.
+type cacheEnt struct {
+ validate func(body []byte) error
+ mtx sync.Mutex
+ header http.Header
+ body []byte
+ expireAfter time.Time
+ refreshAfter time.Time
+ refreshLock sync.Mutex
+}
+
+const (
+ cacheTTL = 5 * time.Minute
+ cacheExpire = 24 * time.Hour
+)
+
+func (ent *cacheEnt) refresh(path string, do func(*http.Request) (*http.Response, error)) (http.Header, []byte, error) {
+ ent.refreshLock.Lock()
+ defer ent.refreshLock.Unlock()
+ if header, body, needRefresh := ent.response(); !needRefresh {
+ // another goroutine refreshed successfully while we
+ // were waiting for refreshLock
+ return header, body, nil
+ } else if body != nil {
+ // Cache is present, but expired. We'll try to refresh
+ // below. Meanwhile, other refresh() calls will queue
+ // up for refreshLock -- and we don't want them to
+ // turn into N upstream requests, even if upstream is
+ // failing. (If we succeed we'll update the expiry
+ // time again below with the real cacheTTL -- this
+ // just takes care of the error case.)
+ ent.mtx.Lock()
+ ent.refreshAfter = time.Now().Add(time.Second)
+ ent.mtx.Unlock()
+ }
+
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+ // "http://localhost" is just a placeholder here -- we'll fill
+ // in req.URL.Path below, and then do(), which is
+ // localClusterRequest(), will replace the scheme and host
+ // parts with the real proxy destination.
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ req.URL.Path = path
+ resp, err := do(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, nil, fmt.Errorf("HTTP status %d", resp.StatusCode)
+ }
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Read error: %w", err)
+ }
+ header := http.Header{}
+ for k, v := range resp.Header {
+ if !dropHeaders[k] && k != "X-Request-Id" {
+ header[k] = v
+ }
+ }
+ if ent.validate != nil {
+ if err := ent.validate(body); err != nil {
+ return nil, nil, err
+ }
+ } else if mediatype, _, err := mime.ParseMediaType(header.Get("Content-Type")); err == nil && mediatype == "application/json" {
+ if !json.Valid(body) {
+ return nil, nil, errors.New("invalid JSON encoding in response")
+ }
+ }
+ ent.mtx.Lock()
+ defer ent.mtx.Unlock()
+ ent.header = header
+ ent.body = body
+ ent.refreshAfter = time.Now().Add(cacheTTL)
+ ent.expireAfter = time.Now().Add(cacheExpire)
+ return ent.header, ent.body, nil
+}
+
+func (ent *cacheEnt) response() (http.Header, []byte, bool) {
+ ent.mtx.Lock()
+ defer ent.mtx.Unlock()
+ if ent.expireAfter.Before(time.Now()) {
+ ent.header, ent.body, ent.refreshAfter = nil, nil, time.Time{}
+ }
+ return ent.header, ent.body, ent.refreshAfter.Before(time.Now())
+}
+
+func (ent *cacheEnt) ServeHTTP(ctx context.Context, w http.ResponseWriter, path string, do func(*http.Request) (*http.Response, error)) {
+ header, body, needRefresh := ent.response()
+ if body == nil {
+ // need to fetch before we can return anything
+ var err error
+ header, body, err = ent.refresh(path, do)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadGateway)
+ return
+ }
+ } else if needRefresh {
+ // re-fetch in background
+ go func() {
+ _, _, err := ent.refresh(path, do)
+ if err != nil {
+ ctxlog.FromContext(ctx).WithError(err).WithField("path", path).Warn("error refreshing cache")
+ }
+ }()
+ }
+ for k, v := range header {
+ w.Header()[k] = v
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write(body)
+}
+
func (h *Handler) proxyRailsAPI(w http.ResponseWriter, req *http.Request, next http.Handler) {
+ if ent, ok := h.cache[req.URL.Path]; ok && req.Method == http.MethodGet {
+ ent.ServeHTTP(req.Context(), w, req.URL.Path, h.localClusterRequest)
+ return
+ }
resp, err := h.localClusterRequest(req)
n, err := h.proxy.ForwardResponse(w, resp, err)
if err != nil {
@@ -230,3 +391,15 @@ func findRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {
}
return best, cluster.TLS.Insecure, nil
}
+
+func validateDiscoveryDoc(body []byte) error {
+ var dd arvados.DiscoveryDocument
+ err := json.Unmarshal(body, &dd)
+ if err != nil {
+ return fmt.Errorf("error decoding JSON response: %w", err)
+ }
+ if dd.BasePath == "" {
+ return errors.New("error in discovery document: no value for basePath")
+ }
+ return nil
+}
diff --git a/lib/controller/handler_test.go b/lib/controller/handler_test.go
index 39c2b1c68e..eef0443b9a 100644
--- a/lib/controller/handler_test.go
+++ b/lib/controller/handler_test.go
@@ -16,9 +16,11 @@ import (
"net/url"
"os"
"strings"
+ "sync"
"testing"
"time"
+ "git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/auth"
@@ -36,11 +38,12 @@ func Test(t *testing.T) {
var _ = check.Suite(&HandlerSuite{})
type HandlerSuite struct {
- cluster *arvados.Cluster
- handler *Handler
- logbuf *bytes.Buffer
- ctx context.Context
- cancel context.CancelFunc
+ cluster *arvados.Cluster
+ handler *Handler
+ railsSpy *arvadostest.Proxy
+ logbuf *bytes.Buffer
+ ctx context.Context
+ cancel context.CancelFunc
}
func (s *HandlerSuite) SetUpTest(c *check.C) {
@@ -54,6 +57,8 @@ func (s *HandlerSuite) SetUpTest(c *check.C) {
s.cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
s.cluster.TLS.Insecure = true
arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, s.railsSpy.URL.String())
arvadostest.SetServiceURL(&s.cluster.Services.Controller, "http://localhost:/")
s.handler = newHandler(s.ctx, s.cluster, "", prometheus.NewRegistry()).(*Handler)
}
@@ -92,6 +97,205 @@ func (s *HandlerSuite) TestConfigExport(c *check.C) {
}
}
+func (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {
+ countRailsReqs := func() int {
+ s.railsSpy.Wait()
+ n := 0
+ for _, req := range s.railsSpy.RequestDumps {
+ if bytes.Contains(req, []byte("/discovery/v1/apis/arvados/v1/rest")) {
+ n++
+ }
+ }
+ return n
+ }
+ getDD := func() int {
+ req := httptest.NewRequest(http.MethodGet, "/discovery/v1/apis/arvados/v1/rest", nil)
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ if resp.Code == http.StatusOK {
+ var dd arvados.DiscoveryDocument
+ err := json.Unmarshal(resp.Body.Bytes(), &dd)
+ c.Check(err, check.IsNil)
+ c.Check(dd.Schemas["Collection"].UUIDPrefix, check.Equals, "4zz18")
+ }
+ return resp.Code
+ }
+ getDDConcurrently := func(n int, expectCode int, checkArgs ...interface{}) *sync.WaitGroup {
+ var wg sync.WaitGroup
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ c.Check(getDD(), check.Equals, append([]interface{}{expectCode}, checkArgs...)...)
+ }()
+ }
+ return &wg
+ }
+ clearCache := func() {
+ for _, ent := range s.handler.cache {
+ ent.refreshLock.Lock()
+ ent.mtx.Lock()
+ ent.body, ent.header, ent.refreshAfter = nil, nil, time.Time{}
+ ent.mtx.Unlock()
+ ent.refreshLock.Unlock()
+ }
+ }
+ waitPendingUpdates := func() {
+ for _, ent := range s.handler.cache {
+ ent.refreshLock.Lock()
+ defer ent.refreshLock.Unlock()
+ ent.mtx.Lock()
+ defer ent.mtx.Unlock()
+ }
+ }
+ refreshNow := func() {
+ waitPendingUpdates()
+ for _, ent := range s.handler.cache {
+ ent.refreshAfter = time.Now()
+ }
+ }
+ expireNow := func() {
+ waitPendingUpdates()
+ for _, ent := range s.handler.cache {
+ ent.expireAfter = time.Now()
+ }
+ }
+
+ // Easy path: first req fetches, subsequent reqs use cache.
+ c.Check(countRailsReqs(), check.Equals, 0)
+ c.Check(getDD(), check.Equals, http.StatusOK)
+ c.Check(countRailsReqs(), check.Equals, 1)
+ c.Check(getDD(), check.Equals, http.StatusOK)
+ c.Check(countRailsReqs(), check.Equals, 1)
+ c.Check(getDD(), check.Equals, http.StatusOK)
+ c.Check(countRailsReqs(), check.Equals, 1)
+
+ // To guarantee we have concurrent requests, we set up
+ // railsSpy to hold up the Handler's outgoing requests until
+ // we send to (or close) holdReqs.
+ holdReqs := make(chan struct{})
+ s.railsSpy.Director = func(*http.Request) {
+ <-holdReqs
+ }
+
+ // Race at startup: first req fetches, other concurrent reqs
+ // wait for the initial fetch to complete, then all return.
+ clearCache()
+ reqsBefore := countRailsReqs()
+ wg := getDDConcurrently(5, http.StatusOK, check.Commentf("race at startup"))
+ close(holdReqs)
+ wg.Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ // Race after expiry: concurrent reqs return the cached data
+ // but initiate a new fetch in the background.
+ refreshNow()
+ holdReqs = make(chan struct{})
+ wg = getDDConcurrently(5, http.StatusOK, check.Commentf("race after expiry"))
+ reqsBefore = countRailsReqs()
+ close(holdReqs)
+ wg.Wait()
+ for deadline := time.Now().Add(time.Second); time.Now().Before(deadline) && countRailsReqs() < reqsBefore+1; {
+ time.Sleep(time.Second / 100)
+ }
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ // Configure railsSpy to return an error or bad content
+ // depending on flags.
+ var wantError, wantBadContent bool
+ s.railsSpy.Director = func(req *http.Request) {
+ if wantError {
+ req.Method = "MAKE-COFFEE"
+ } else if wantBadContent {
+ req.URL.Path = "/_health/ping"
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+ }
+ }
+
+ // Error at startup (empty cache) => caller gets error, and we
+ // make an upstream attempt for each incoming request because
+ // we have nothing better to return
+ clearCache()
+ wantError, wantBadContent = true, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ wg = getDDConcurrently(5, http.StatusBadGateway, check.Commentf("error at startup"))
+ close(holdReqs)
+ wg.Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+5)
+
+ // Response status is OK but body is not a discovery document
+ wantError, wantBadContent = false, true
+ reqsBefore = countRailsReqs()
+ c.Check(getDD(), check.Equals, http.StatusBadGateway)
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ // Error condition clears => caller gets OK, cache is warmed
+ // up
+ wantError, wantBadContent = false, false
+ reqsBefore = countRailsReqs()
+ getDDConcurrently(5, http.StatusOK, check.Commentf("success after errors at startup")).Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ // Error with warm cache => caller gets OK (with no attempt to
+ // re-fetch)
+ wantError, wantBadContent = true, false
+ reqsBefore = countRailsReqs()
+ getDDConcurrently(5, http.StatusOK, check.Commentf("error with warm cache")).Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore)
+
+ // Error with stale cache => caller gets OK with stale data
+ // while the re-fetch is attempted in the background
+ refreshNow()
+ wantError, wantBadContent = true, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ getDDConcurrently(5, http.StatusOK, check.Commentf("error with stale cache")).Wait()
+ close(holdReqs)
+ // Only one attempt to re-fetch (holdReqs ensured the first
+ // update took long enough for the last incoming request to
+ // arrive)
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ refreshNow()
+ wantError, wantBadContent = false, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ getDDConcurrently(5, http.StatusOK, check.Commentf("refresh cache after error condition clears")).Wait()
+ close(holdReqs)
+ waitPendingUpdates()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+
+ // Make sure expireAfter is getting set
+ waitPendingUpdates()
+ exp := s.handler.cache["/discovery/v1/apis/arvados/v1/rest"].expireAfter.Sub(time.Now())
+ c.Check(exp > cacheTTL, check.Equals, true)
+ c.Check(exp < cacheExpire, check.Equals, true)
+
+ // After the cache *expires* it behaves as if uninitialized:
+ // each incoming request does a new upstream request until one
+ // succeeds.
+ //
+ // First check failure after expiry:
+ expireNow()
+ wantError, wantBadContent = true, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ wg = getDDConcurrently(5, http.StatusBadGateway, check.Commentf("error after expiry"))
+ close(holdReqs)
+ wg.Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+5)
+
+ // Success after expiry:
+ wantError, wantBadContent = false, false
+ reqsBefore = countRailsReqs()
+ holdReqs = make(chan struct{})
+ wg = getDDConcurrently(5, http.StatusOK, check.Commentf("success after expiry"))
+ close(holdReqs)
+ wg.Wait()
+ c.Check(countRailsReqs(), check.Equals, reqsBefore+1)
+}
+
func (s *HandlerSuite) TestVocabularyExport(c *check.C) {
voc := `{
"strict_tags": false,
@@ -209,7 +413,7 @@ func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
// etc.
func (s *HandlerSuite) TestRequestCancel(c *check.C) {
ctx, cancel := context.WithCancel(context.Background())
- req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil).WithContext(ctx)
+ req := httptest.NewRequest("GET", "/static/login_failure", nil).WithContext(ctx)
resp := httptest.NewRecorder()
cancel()
s.handler.ServeHTTP(resp, req)
@@ -271,18 +475,20 @@ func (s *HandlerSuite) TestProxyNotFound(c *check.C) {
}
func (s *HandlerSuite) TestLogoutGoogle(c *check.C) {
+ s.cluster.Services.Workbench2.ExternalURL = arvados.URL{Scheme: "https", Host: "wb2.example", Path: "/"}
s.cluster.Login.Google.Enable = true
s.cluster.Login.Google.ClientID = "test"
- req := httptest.NewRequest("GET", "https://0.0.0.0:1/logout?return_to=https://example.com/foo", nil)
+ req := httptest.NewRequest("GET", "https://0.0.0.0:1/logout?return_to=https://wb2.example/", nil)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
if !c.Check(resp.Code, check.Equals, http.StatusFound) {
c.Log(resp.Body.String())
}
- c.Check(resp.Header().Get("Location"), check.Equals, "https://example.com/foo")
+ c.Check(resp.Header().Get("Location"), check.Equals, "https://wb2.example/")
}
func (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {
+ c.Assert(s.handler.CheckHealth(), check.IsNil)
req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
user, ok, err := s.handler.validateAPItoken(req, arvadostest.ActiveToken)
c.Assert(err, check.IsNil)
@@ -294,6 +500,7 @@ func (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {
}
func (s *HandlerSuite) TestValidateV2APIToken(c *check.C) {
+ c.Assert(s.handler.CheckHealth(), check.IsNil)
req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
user, ok, err := s.handler.validateAPItoken(req, arvadostest.ActiveTokenV2)
c.Assert(err, check.IsNil)
@@ -336,6 +543,7 @@ func (s *HandlerSuite) TestLogTokenUUID(c *check.C) {
}
func (s *HandlerSuite) TestCreateAPIToken(c *check.C) {
+ c.Assert(s.handler.CheckHealth(), check.IsNil)
req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
auth, err := s.handler.createAPItoken(req, arvadostest.ActiveUserUUID, nil)
c.Assert(err, check.IsNil)
@@ -432,7 +640,7 @@ func (s *HandlerSuite) TestGetObjects(c *check.C) {
testCases := map[string]map[string]bool{
"api_clients/" + arvadostest.TrustedWorkbenchAPIClientUUID: nil,
"api_client_authorizations/" + auth.UUID: {"href": true, "modified_by_client_uuid": true, "modified_by_user_uuid": true},
- "authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: nil,
+ "authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: {"href": true},
"collections/" + arvadostest.CollectionWithUniqueWordsUUID: {"href": true},
"containers/" + arvadostest.RunningContainerUUID: nil,
"container_requests/" + arvadostest.QueuedContainerRequestUUID: nil,
@@ -476,7 +684,7 @@ func (s *HandlerSuite) TestTrashSweep(c *check.C) {
coll, err := s.handler.federation.CollectionCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{"name": "test trash sweep"}, EnsureUniqueName: true})
c.Assert(err, check.IsNil)
defer s.handler.federation.CollectionDelete(ctx, arvados.DeleteOptions{UUID: coll.UUID})
- db, err := s.handler.db(s.ctx)
+ db, err := s.handler.dbConnector.GetDB(s.ctx)
c.Assert(err, check.IsNil)
_, err = db.ExecContext(s.ctx, `update collections set trash_at = $1, delete_at = $2 where uuid = $3`, time.Now().UTC().Add(time.Second/10), time.Now().UTC().Add(time.Hour), coll.UUID)
c.Assert(err, check.IsNil)
@@ -494,3 +702,123 @@ func (s *HandlerSuite) TestTrashSweep(c *check.C) {
time.Sleep(time.Second / 10)
}
}
+
+func (s *HandlerSuite) TestContainerLogSweep(c *check.C) {
+ s.cluster.SystemRootToken = arvadostest.SystemRootToken
+ s.cluster.Containers.Logging.SweepInterval = arvados.Duration(time.Second / 10)
+ s.handler.CheckHealth()
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+ logentry, err := s.handler.federation.LogCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "object_uuid": arvadostest.CompletedContainerUUID,
+ "event_type": "stderr",
+ "properties": map[string]interface{}{
+ "text": "test trash sweep\n",
+ },
+ }})
+ c.Assert(err, check.IsNil)
+ defer s.handler.federation.LogDelete(ctx, arvados.DeleteOptions{UUID: logentry.UUID})
+ deadline := time.Now().Add(5 * time.Second)
+ for {
+ if time.Now().After(deadline) {
+ c.Log("timed out")
+ c.FailNow()
+ }
+ logentries, err := s.handler.federation.LogList(ctx, arvados.ListOptions{Filters: []arvados.Filter{{"uuid", "=", logentry.UUID}}, Limit: -1})
+ c.Assert(err, check.IsNil)
+ if len(logentries.Items) == 0 {
+ break
+ }
+ time.Sleep(time.Second / 10)
+ }
+}
+
+func (s *HandlerSuite) TestLogActivity(c *check.C) {
+ s.cluster.SystemRootToken = arvadostest.SystemRootToken
+ s.cluster.Users.ActivityLoggingPeriod = arvados.Duration(24 * time.Hour)
+ s.handler.CheckHealth()
+
+ testServer := newServerFromIntegrationTestEnv(c)
+ testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.handler))
+ c.Assert(testServer.Start(), check.IsNil)
+ defer testServer.Close()
+
+ u, _ := url.Parse("http://" + testServer.Addr)
+ client := rpc.NewConn(s.cluster.ClusterID, u, true, rpc.PassthroughTokenProvider)
+
+ starttime := time.Now()
+ for i := 0; i < 4; i++ {
+ for _, token := range []string{
+ arvadostest.ActiveTokenV2,
+ arvadostest.ActiveToken,
+ arvadostest.SpectatorToken,
+ } {
+ ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{token}})
+ _, err := client.CollectionList(ctx, arvados.ListOptions{})
+ c.Assert(err, check.IsNil)
+ }
+ }
+ db, err := s.handler.dbConnector.GetDB(s.ctx)
+ c.Assert(err, check.IsNil)
+ for _, userUUID := range []string{arvadostest.ActiveUserUUID, arvadostest.SpectatorUserUUID} {
+ var rows int
+ err = db.QueryRowContext(s.ctx, `select count(uuid) from logs where object_uuid = $1 and event_at > $2`, arvadostest.ActiveUserUUID, starttime.UTC()).Scan(&rows)
+ c.Assert(err, check.IsNil)
+ c.Check(rows, check.Equals, 1, check.Commentf("expect 1 row for user uuid %s", userUUID))
+ }
+}
+
+func (s *HandlerSuite) TestLogLimiting(c *check.C) {
+ s.handler.Cluster.API.MaxConcurrentRequests = 2
+ s.handler.Cluster.API.LogCreateRequestFraction = 0.5
+
+ logreq := httptest.NewRequest("POST", "/arvados/v1/logs", strings.NewReader(`{
+ "log": {
+ "event_type": "test"
+ }
+ }`))
+ logreq.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+
+ // Log create succeeds
+ for i := 0; i < 2; i++ {
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, logreq)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ var lg arvados.Log
+ err := json.Unmarshal(resp.Body.Bytes(), &lg)
+ c.Check(err, check.IsNil)
+ c.Check(lg.UUID, check.Matches, "zzzzz-57u5n-.*")
+ }
+
+ // Pretend there's a log create in flight
+ s.handler.limitLogCreate <- struct{}{}
+
+ // Log create should be rejected now
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, logreq)
+ c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+
+ // Other requests still succeed
+ req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+ resp = httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ var u arvados.User
+ err := json.Unmarshal(resp.Body.Bytes(), &u)
+ c.Check(err, check.IsNil)
+ c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+
+ // log create still fails
+ resp = httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, logreq)
+ c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+
+ // Pretend in-flight log is done
+ <-s.handler.limitLogCreate
+
+ // log create succeeds again
+ resp = httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, logreq)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+
+}
diff --git a/lib/controller/integration_test.go b/lib/controller/integration_test.go
index b0ec4293a3..45f35a6d2e 100644
--- a/lib/controller/integration_test.go
+++ b/lib/controller/integration_test.go
@@ -28,6 +28,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
check "gopkg.in/check.v1"
)
@@ -72,6 +73,8 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
Insecure: true
SystemLogs:
Format: text
+ API:
+ MaxConcurrentRequests: 128
Containers:
CloudVMs:
Enable: true
@@ -165,6 +168,20 @@ func (s *IntegrationSuite) TestDefaultStorageClassesOnCollections(c *check.C) {
c.Assert(coll.StorageClassesDesired, check.DeepEquals, kc.DefaultStorageClasses)
}
+func (s *IntegrationSuite) createTestCollectionManifest(c *check.C, ac *arvados.Client, kc *keepclient.KeepClient, content string) string {
+ fs, err := (&arvados.Collection{}).FileSystem(ac, kc)
+ c.Assert(err, check.IsNil)
+ f, err := fs.OpenFile("test.txt", os.O_CREATE|os.O_RDWR, 0777)
+ c.Assert(err, check.IsNil)
+ _, err = io.WriteString(f, content)
+ c.Assert(err, check.IsNil)
+ err = f.Close()
+ c.Assert(err, check.IsNil)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Assert(err, check.IsNil)
+ return mtxt
+}
+
func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
conn1 := s.super.Conn("z1111")
rootctx1, _, _ := s.super.RootClients("z1111")
@@ -173,34 +190,70 @@ func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
// Create the collection to find its PDH (but don't save it
// anywhere yet)
- var coll1 arvados.Collection
- fs1, err := coll1.FileSystem(ac1, kc1)
- c.Assert(err, check.IsNil)
- f, err := fs1.OpenFile("test.txt", os.O_CREATE|os.O_RDWR, 0777)
- c.Assert(err, check.IsNil)
- _, err = io.WriteString(f, "IntegrationSuite.TestGetCollectionByPDH")
- c.Assert(err, check.IsNil)
- err = f.Close()
- c.Assert(err, check.IsNil)
- mtxt, err := fs1.MarshalManifest(".")
- c.Assert(err, check.IsNil)
+ mtxt := s.createTestCollectionManifest(c, ac1, kc1, c.TestName())
pdh := arvados.PortableDataHash(mtxt)
// Looking up the PDH before saving returns 404 if cycle
// detection is working.
- _, err = conn1.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})
+ _, err := conn1.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})
c.Assert(err, check.ErrorMatches, `.*404 Not Found.*`)
// Save the collection on cluster z1111.
- coll1, err = conn1.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ _, err = conn1.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
"manifest_text": mtxt,
}})
c.Assert(err, check.IsNil)
// Retrieve the collection from cluster z3333.
- coll, err := conn3.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})
+ coll2, err := conn3.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})
c.Check(err, check.IsNil)
- c.Check(coll.PortableDataHash, check.Equals, pdh)
+ c.Check(coll2.PortableDataHash, check.Equals, pdh)
+}
+
+func (s *IntegrationSuite) TestFederation_Write1Read2(c *check.C) {
+ s.testFederationCollectionAccess(c, "z1111", "z2222")
+}
+
+func (s *IntegrationSuite) TestFederation_Write2Read1(c *check.C) {
+ s.testFederationCollectionAccess(c, "z2222", "z1111")
+}
+
+func (s *IntegrationSuite) TestFederation_Write2Read3(c *check.C) {
+ s.testFederationCollectionAccess(c, "z2222", "z3333")
+}
+
+func (s *IntegrationSuite) testFederationCollectionAccess(c *check.C, writeCluster, readCluster string) {
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+
+ connW := s.super.Conn(writeCluster)
+ userctxW, acW, kcW := s.super.ClientsWithToken(writeCluster, ac1.AuthToken)
+ kcW.DiskCacheSize = keepclient.DiskCacheDisabled
+ connR := s.super.Conn(readCluster)
+ userctxR, acR, kcR := s.super.ClientsWithToken(readCluster, ac1.AuthToken)
+ kcR.DiskCacheSize = keepclient.DiskCacheDisabled
+
+ filedata := fmt.Sprintf("%s: write to %s, read from %s", c.TestName(), writeCluster, readCluster)
+ mtxt := s.createTestCollectionManifest(c, acW, kcW, filedata)
+ collW, err := connW.CollectionCreate(userctxW, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "manifest_text": mtxt,
+ }})
+ c.Assert(err, check.IsNil)
+
+ collR, err := connR.CollectionGet(userctxR, arvados.GetOptions{UUID: collW.UUID})
+ if !c.Check(err, check.IsNil) {
+ return
+ }
+ fsR, err := collR.FileSystem(acR, kcR)
+ if !c.Check(err, check.IsNil) {
+ return
+ }
+ buf, err := fs.ReadFile(arvados.FS(fsR), "test.txt")
+ if !c.Check(err, check.IsNil) {
+ return
+ }
+ c.Check(string(buf), check.Equals, filedata)
}
// Tests bug #18004
@@ -499,6 +552,7 @@ func (s *IntegrationSuite) TestCreateContainerRequestWithFedToken(c *check.C) {
req.Header.Set("Authorization", "OAuth2 "+ac2.AuthToken)
resp, err = arvados.InsecureHTTPClient.Do(req)
c.Assert(err, check.IsNil)
+ defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&cr)
c.Check(err, check.IsNil)
c.Check(cr.UUID, check.Matches, "z2222-.*")
@@ -536,8 +590,10 @@ func (s *IntegrationSuite) TestCreateContainerRequestWithBadToken(c *check.C) {
c.Assert(err, check.IsNil)
req.Header.Set("Content-Type", "application/json")
resp, err := ac1.Do(req)
- c.Assert(err, check.IsNil)
- c.Assert(resp.StatusCode, check.Equals, tt.expectedCode)
+ if c.Check(err, check.IsNil) {
+ c.Assert(resp.StatusCode, check.Equals, tt.expectedCode)
+ resp.Body.Close()
+ }
}
}
@@ -605,9 +661,11 @@ func (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {
var jresp httpserver.ErrorResponse
err := json.NewDecoder(resp.Body).Decode(&jresp)
c.Check(err, check.IsNil)
- c.Assert(jresp.Errors, check.HasLen, 1)
- c.Check(jresp.Errors[0], check.Matches, `.*\(`+respHdr+`\).*`)
+ if c.Check(jresp.Errors, check.HasLen, 1) {
+ c.Check(jresp.Errors[0], check.Matches, `.*\(`+respHdr+`\).*`)
+ }
}
+ resp.Body.Close()
}
}
@@ -964,8 +1022,8 @@ func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
"hostname": "example",
},
})
+ c.Assert(err, check.IsNil)
c.Check(outVM.UUID[0:5], check.Equals, "z3333")
- c.Check(err, check.IsNil)
// Make sure z3333 user list is up to date
_, err = conn3.UserList(rootctx3, arvados.ListOptions{Limit: 1000})
@@ -1133,7 +1191,7 @@ func (s *IntegrationSuite) TestRunTrivialContainer(c *check.C) {
"environment": map[string]string{},
"mounts": map[string]arvados.Mount{"/out": {Kind: "tmp", Capacity: 10000}},
"output_path": "/out",
- "runtime_constraints": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1},
+ "runtime_constraints": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1, KeepCacheRAM: 1 << 26},
"priority": 1,
"state": arvados.ContainerRequestStateCommitted,
}, 0)
@@ -1160,7 +1218,7 @@ func (s *IntegrationSuite) TestContainerInputOnDifferentCluster(c *check.C) {
"/out": {Kind: "tmp", Capacity: 10000},
},
"output_path": "/out",
- "runtime_constraints": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1},
+ "runtime_constraints": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1, KeepCacheRAM: 1 << 26},
"priority": 1,
"state": arvados.ContainerRequestStateCommitted,
"container_count_max": 1,
@@ -1224,12 +1282,35 @@ func (s *IntegrationSuite) runContainer(c *check.C, clusterID string, token stri
return cfs
}
+ checkwebdavlogs := func(cr arvados.ContainerRequest) {
+ req, err := http.NewRequest("OPTIONS", "https://"+ac.APIHost+"/arvados/v1/container_requests/"+cr.UUID+"/log/"+cr.ContainerUUID+"/", nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Origin", "http://example.example")
+ resp, err := ac.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ // Check for duplicate headers -- must use Header[], not Header.Get()
+ c.Check(resp.Header["Access-Control-Allow-Origin"], check.DeepEquals, []string{"*"})
+ }
+
var ctr arvados.Container
var lastState arvados.ContainerState
+ var status, lastStatus arvados.ContainerStatus
+ var allStatus string
+ checkstatus := func() {
+ err := ac.RequestAndDecode(&status, "GET", "/arvados/v1/container_requests/"+cr.UUID+"/container_status", nil, nil)
+ c.Assert(err, check.IsNil)
+ if status != lastStatus {
+ c.Logf("container status: %s, %s", status.State, status.SchedulingStatus)
+ allStatus += fmt.Sprintf("%s, %s\n", status.State, status.SchedulingStatus)
+ lastStatus = status
+ }
+ }
deadline := time.Now().Add(time.Minute)
- for cr.State != arvados.ContainerRequestStateFinal {
+ for cr.State != arvados.ContainerRequestStateFinal || (lastStatus.State != arvados.ContainerStateComplete && lastStatus.State != arvados.ContainerStateCancelled) {
err = ac.RequestAndDecode(&cr, "GET", "/arvados/v1/container_requests/"+cr.UUID, nil, nil)
c.Assert(err, check.IsNil)
+ checkstatus()
err = ac.RequestAndDecode(&ctr, "GET", "/arvados/v1/containers/"+cr.ContainerUUID, nil, nil)
if err != nil {
c.Logf("error getting container state: %s", err)
@@ -1239,18 +1320,33 @@ func (s *IntegrationSuite) runContainer(c *check.C, clusterID string, token stri
} else {
if time.Now().After(deadline) {
c.Errorf("timed out, container state is %q", cr.State)
- showlogs(ctr.Log)
+ if ctr.Log == "" {
+ c.Logf("=== NO LOG COLLECTION saved for container")
+ } else {
+ showlogs(ctr.Log)
+ }
c.FailNow()
}
time.Sleep(time.Second / 2)
}
}
+ checkstatus()
+ c.Logf("cr.CumulativeCost == %f", cr.CumulativeCost)
+ c.Check(cr.CumulativeCost, check.Not(check.Equals), 0.0)
if expectExitCode >= 0 {
c.Check(ctr.State, check.Equals, arvados.ContainerStateComplete)
c.Check(ctr.ExitCode, check.Equals, expectExitCode)
err = ac.RequestAndDecode(&outcoll, "GET", "/arvados/v1/collections/"+cr.OutputUUID, nil, nil)
c.Assert(err, check.IsNil)
+ c.Check(allStatus, check.Matches, `Queued, waiting for dispatch\n`+
+ `(Queued, waiting.*\n)*`+
+ `(Locked, waiting for dispatch\n)?`+
+ `(Locked, waiting for new instance to be ready\n)?`+
+ `(Locked, preparing runtime environment\n)?`+
+ `(Running, \n)?`+
+ `Complete, \n`)
}
logcfs = showlogs(cr.LogUUID)
+ checkwebdavlogs(cr)
return outcoll, logcfs
}
diff --git a/lib/controller/localdb/authorized_key.go b/lib/controller/localdb/authorized_key.go
new file mode 100644
index 0000000000..4d858c8fa7
--- /dev/null
+++ b/lib/controller/localdb/authorized_key.go
@@ -0,0 +1,59 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "golang.org/x/crypto/ssh"
+)
+
+// AuthorizedKeyCreate checks that the provided public key is valid,
+// then proxies to railsproxy.
+func (conn *Conn) AuthorizedKeyCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.AuthorizedKey, error) {
+ if err := validateKey(opts.Attrs); err != nil {
+ return arvados.AuthorizedKey{}, httpserver.ErrorWithStatus(err, http.StatusBadRequest)
+ }
+ return conn.railsProxy.AuthorizedKeyCreate(ctx, opts)
+}
+
+// AuthorizedKeyUpdate checks that the provided public key is valid,
+// then proxies to railsproxy.
+func (conn *Conn) AuthorizedKeyUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.AuthorizedKey, error) {
+ if err := validateKey(opts.Attrs); err != nil {
+ return arvados.AuthorizedKey{}, httpserver.ErrorWithStatus(err, http.StatusBadRequest)
+ }
+ return conn.railsProxy.AuthorizedKeyUpdate(ctx, opts)
+}
+
+func validateKey(attrs map[string]interface{}) error {
+ in, _ := attrs["public_key"].(string)
+ if in == "" {
+ return nil
+ }
+ in = strings.TrimSpace(in)
+ if strings.IndexAny(in, "\r\n") >= 0 {
+ return errors.New("Public key does not appear to be valid: extra data after key")
+ }
+ pubkey, _, _, rest, err := ssh.ParseAuthorizedKey([]byte(in))
+ if err != nil {
+ return fmt.Errorf("Public key does not appear to be valid: %w", err)
+ }
+ if len(rest) > 0 {
+ return errors.New("Public key does not appear to be valid: extra data after key")
+ }
+ if i := strings.Index(in, " "); i < 0 {
+ return errors.New("Public key does not appear to be valid: no leading type field")
+ } else if in[:i] != pubkey.Type() {
+ return fmt.Errorf("Public key does not appear to be valid: leading type field %q does not match actual key type %q", in[:i], pubkey.Type())
+ }
+ return nil
+}
diff --git a/lib/controller/localdb/authorized_key_test.go b/lib/controller/localdb/authorized_key_test.go
new file mode 100644
index 0000000000..44fa3cf94e
--- /dev/null
+++ b/lib/controller/localdb/authorized_key_test.go
@@ -0,0 +1,114 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ _ "embed"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&authorizedKeySuite{})
+
+type authorizedKeySuite struct {
+ localdbSuite
+}
+
+//go:embed testdata/rsa.pub
+var testPubKey string
+
+func (s *authorizedKeySuite) TestAuthorizedKeyCreate(c *C) {
+ ak, err := s.localdb.AuthorizedKeyCreate(s.userctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "name": "testkey",
+ "key_type": "SSH",
+ }})
+ c.Assert(err, IsNil)
+ c.Check(ak.KeyType, Equals, "SSH")
+ defer s.localdb.AuthorizedKeyDelete(s.userctx, arvados.DeleteOptions{UUID: ak.UUID})
+ updated, err := s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{"name": "testkeyrenamed"}})
+ c.Check(err, IsNil)
+ c.Check(updated.UUID, Equals, ak.UUID)
+ c.Check(updated.Name, Equals, "testkeyrenamed")
+ c.Check(updated.ModifiedByUserUUID, Equals, arvadostest.ActiveUserUUID)
+
+ _, err = s.localdb.AuthorizedKeyCreate(s.userctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "name": "testkey",
+ "public_key": "ssh-dsa boguskey\n",
+ }})
+ c.Check(err, ErrorMatches, `Public key does not appear to be valid: ssh: no key found`)
+ _, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{
+ "public_key": strings.Replace(testPubKey, "A", "#", 1),
+ }})
+ c.Check(err, ErrorMatches, `Public key does not appear to be valid: ssh: no key found`)
+ _, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{
+ "public_key": testPubKey + testPubKey,
+ }})
+ c.Check(err, ErrorMatches, `Public key does not appear to be valid: extra data after key`)
+ _, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{
+ "public_key": testPubKey + "# extra data\n",
+ }})
+ c.Check(err, ErrorMatches, `Public key does not appear to be valid: extra data after key`)
+ _, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{
+ "public_key": strings.Replace(testPubKey, "ssh-rsa", "ssh-dsa", 1),
+ }})
+ c.Check(err, ErrorMatches, `Public key does not appear to be valid: leading type field "ssh-dsa" does not match actual key type "ssh-rsa"`)
+ var se httpserver.HTTPStatusError
+ if c.Check(errors.As(err, &se), Equals, true) {
+ c.Check(se.HTTPStatus(), Equals, http.StatusBadRequest)
+ }
+
+ dirents, err := os.ReadDir("./testdata")
+ c.Assert(err, IsNil)
+ c.Assert(dirents, Not(HasLen), 0)
+ for _, dirent := range dirents {
+ if !strings.HasSuffix(dirent.Name(), ".pub") {
+ continue
+ }
+ pubkeyfile := "./testdata/" + dirent.Name()
+ c.Logf("checking public key from %s", pubkeyfile)
+ pubkey, err := ioutil.ReadFile(pubkeyfile)
+ if !c.Check(err, IsNil) {
+ continue
+ }
+ updated, err := s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{
+ "public_key": string(pubkey),
+ }})
+ c.Check(err, IsNil)
+ c.Check(updated.PublicKey, Equals, string(pubkey))
+
+ _, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: ak.UUID,
+ Attrs: map[string]interface{}{
+ "public_key": strings.Replace(string(pubkey), " ", "-bogus ", 1),
+ }})
+ c.Check(err, ErrorMatches, `.*type field ".*" does not match actual key type ".*"`)
+ }
+
+ deleted, err := s.localdb.AuthorizedKeyDelete(s.userctx, arvados.DeleteOptions{UUID: ak.UUID})
+ c.Check(err, IsNil)
+ c.Check(deleted.UUID, Equals, ak.UUID)
+}
diff --git a/lib/controller/localdb/collection.go b/lib/controller/localdb/collection.go
index 868e466e9e..581595e5e3 100644
--- a/lib/controller/localdb/collection.go
+++ b/lib/controller/localdb/collection.go
@@ -22,6 +22,7 @@ import (
// CollectionGet defers to railsProxy for everything except blob
// signatures.
func (conn *Conn) CollectionGet(ctx context.Context, opts arvados.GetOptions) (arvados.Collection, error) {
+ conn.logActivity(ctx)
if len(opts.Select) > 0 {
// We need to know IsTrashed and TrashAt to implement
// signing properly, even if the caller doesn't want
@@ -39,6 +40,7 @@ func (conn *Conn) CollectionGet(ctx context.Context, opts arvados.GetOptions) (a
// CollectionList defers to railsProxy for everything except blob
// signatures.
func (conn *Conn) CollectionList(ctx context.Context, opts arvados.ListOptions) (arvados.CollectionList, error) {
+ conn.logActivity(ctx)
if len(opts.Select) > 0 {
// We need to know IsTrashed and TrashAt to implement
// signing properly, even if the caller doesn't want
@@ -58,6 +60,7 @@ func (conn *Conn) CollectionList(ctx context.Context, opts arvados.ListOptions)
// CollectionCreate defers to railsProxy for everything except blob
// signatures and vocabulary checking.
func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Collection, error) {
+ conn.logActivity(ctx)
err := conn.checkProperties(ctx, opts.Attrs["properties"])
if err != nil {
return arvados.Collection{}, err
@@ -82,6 +85,7 @@ func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptio
// CollectionUpdate defers to railsProxy for everything except blob
// signatures and vocabulary checking.
func (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Collection, error) {
+ conn.logActivity(ctx)
err := conn.checkProperties(ctx, opts.Attrs["properties"])
if err != nil {
return arvados.Collection{}, err
diff --git a/lib/controller/localdb/collection_test.go b/lib/controller/localdb/collection_test.go
index dac8b769fe..7d1a909a6f 100644
--- a/lib/controller/localdb/collection_test.go
+++ b/lib/controller/localdb/collection_test.go
@@ -5,7 +5,6 @@
package localdb
import (
- "context"
"io/fs"
"path/filepath"
"regexp"
@@ -14,13 +13,10 @@ import (
"strings"
"time"
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
check "gopkg.in/check.v1"
)
@@ -28,58 +24,11 @@ import (
var _ = check.Suite(&CollectionSuite{})
type CollectionSuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- railsSpy *arvadostest.Proxy
-}
-
-func (s *CollectionSuite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
-}
-
-func (s *CollectionSuite) SetUpTest(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
- s.localdb = NewConn(s.cluster)
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
-}
-
-func (s *CollectionSuite) TearDownTest(c *check.C) {
- s.railsSpy.Close()
-}
-
-func (s *CollectionSuite) setUpVocabulary(c *check.C, testVocabulary string) {
- if testVocabulary == "" {
- testVocabulary = `{
- "strict_tags": false,
- "tags": {
- "IDTAGIMPORTANCES": {
- "strict": true,
- "labels": [{"label": "Importance"}, {"label": "Priority"}],
- "values": {
- "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
- "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
- "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
- }
- }
- }
- }`
- }
- voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
- c.Assert(err, check.IsNil)
- s.cluster.API.VocabularyPath = "foo"
- s.localdb.vocabularyCache = voc
+ localdbSuite
}
func (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -95,7 +44,7 @@ func (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C
c.Log(c.TestName()+" ", tt.name)
// Create with properties
- coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+ coll, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
"properties": tt.props,
@@ -108,9 +57,9 @@ func (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C
}
// Create, then update with properties
- coll, err = s.localdb.CollectionCreate(ctx, arvados.CreateOptions{})
+ coll, err = s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{})
c.Assert(err, check.IsNil)
- coll, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ coll, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: coll.UUID,
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
@@ -126,33 +75,31 @@ func (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C
}
func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.AdminToken}})
- foo, err := s.localdb.railsProxy.CollectionCreate(ctx, arvados.CreateOptions{
+ adminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)
+ foo, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n",
}})
c.Assert(err, check.IsNil)
- s.localdb.signCollection(ctx, &foo)
- foobarbaz, err := s.localdb.railsProxy.CollectionCreate(ctx, arvados.CreateOptions{
+ s.localdb.signCollection(adminctx, &foo)
+ foobarbaz, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"manifest_text": "./foo/bar 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n",
}})
c.Assert(err, check.IsNil)
- s.localdb.signCollection(ctx, &foobarbaz)
- wazqux, err := s.localdb.railsProxy.CollectionCreate(ctx, arvados.CreateOptions{
+ s.localdb.signCollection(adminctx, &foobarbaz)
+ wazqux, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"manifest_text": "./waz d85b1213473c2fd7c2045020a6b9c62b+3 0:3:qux.txt\n",
}})
c.Assert(err, check.IsNil)
- s.localdb.signCollection(ctx, &wazqux)
-
- ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+ s.localdb.signCollection(adminctx, &wazqux)
// Create using content from existing collections
- dst, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+ dst, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
ReplaceFiles: map[string]string{
"/f": foo.PortableDataHash + "/foo.txt",
"/b": foobarbaz.PortableDataHash + "/foo/bar",
@@ -166,7 +113,7 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
s.expectFiles(c, dst, "f", "b/baz.txt", "q/waz/qux.txt", "w/qux.txt")
// Delete a file and a directory
- dst, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ dst, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: dst.UUID,
ReplaceFiles: map[string]string{
"/f": "",
@@ -176,7 +123,7 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
s.expectFiles(c, dst, "b/baz.txt", "q/", "w/qux.txt")
// Move and copy content within collection
- dst, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ dst, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: dst.UUID,
ReplaceFiles: map[string]string{
// Note splicing content to /b/corge.txt but
@@ -189,7 +136,7 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
s.expectFiles(c, dst, "b/corge.txt", "q/", "w/qux.txt", "quux/corge.txt")
// Remove everything except one file
- dst, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ dst, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: dst.UUID,
ReplaceFiles: map[string]string{
"/": "",
@@ -199,7 +146,7 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
s.expectFiles(c, dst, "b/corge.txt")
// Copy entire collection to root
- dstcopy, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+ dstcopy, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
ReplaceFiles: map[string]string{
"/": dst.PortableDataHash,
}})
@@ -237,7 +184,7 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
{"/bad": "bad/b"},
{"/bad": dst.UUID + "/b"},
} {
- _, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ _, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: dst.UUID,
ReplaceFiles: badrepl,
})
@@ -246,7 +193,7 @@ func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) {
}
// Check conflicting replace_files and manifest_text
- _, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ _, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: dst.UUID,
ReplaceFiles: map[string]string{"/": ""},
Attrs: map[string]interface{}{
@@ -265,7 +212,7 @@ func (s *CollectionSuite) expectFiles(c *check.C, coll arvados.Collection, expec
c.Assert(err, check.IsNil)
kc, err := keepclient.MakeKeepClient(ac)
c.Assert(err, check.IsNil)
- cfs, err := coll.FileSystem(arvados.NewClientFromEnv(), kc)
+ cfs, err := coll.FileSystem(client, kc)
c.Assert(err, check.IsNil)
var found []string
nonemptydirs := map[string]bool{}
@@ -300,18 +247,16 @@ func (s *CollectionSuite) expectFiles(c *check.C, coll arvados.Collection, expec
}
func (s *CollectionSuite) TestSignatures(c *check.C) {
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
-
- resp, err := s.localdb.CollectionGet(ctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
+ resp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
c.Check(err, check.IsNil)
c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
s.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)
- resp, err = s.localdb.CollectionGet(ctx, arvados.GetOptions{UUID: arvadostest.FooCollection, Select: []string{"manifest_text"}})
+ resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection, Select: []string{"manifest_text"}})
c.Check(err, check.IsNil)
c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
- lresp, err := s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}})
+ lresp, err := s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}})
c.Check(err, check.IsNil)
if c.Check(lresp.Items, check.HasLen, 1) {
c.Check(lresp.Items[0].UUID, check.Equals, arvadostest.FooCollection)
@@ -319,14 +264,14 @@ func (s *CollectionSuite) TestSignatures(c *check.C) {
c.Check(lresp.Items[0].UnsignedManifestText, check.Equals, "")
}
- lresp, err = s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"manifest_text"}})
+ lresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"manifest_text"}})
c.Check(err, check.IsNil)
if c.Check(lresp.Items, check.HasLen, 1) {
c.Check(lresp.Items[0].ManifestText, check.Matches, `(?ms).* acbd[^ ]*\+3\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)
c.Check(lresp.Items[0].UnsignedManifestText, check.Equals, "")
}
- lresp, err = s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"unsigned_manifest_text"}})
+ lresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}}, Select: []string{"unsigned_manifest_text"}})
c.Check(err, check.IsNil)
if c.Check(lresp.Items, check.HasLen, 1) {
c.Check(lresp.Items[0].ManifestText, check.Equals, "")
@@ -335,7 +280,7 @@ func (s *CollectionSuite) TestSignatures(c *check.C) {
// early trash date causes lower signature TTL (even if
// trash_at and is_trashed fields are unselected)
- trashed, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+ trashed, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
Select: []string{"uuid", "manifest_text"},
Attrs: map[string]interface{}{
"manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
@@ -343,25 +288,25 @@ func (s *CollectionSuite) TestSignatures(c *check.C) {
}})
c.Assert(err, check.IsNil)
s.checkSignatureExpiry(c, trashed.ManifestText, time.Hour)
- resp, err = s.localdb.CollectionGet(ctx, arvados.GetOptions{UUID: trashed.UUID})
+ resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})
c.Assert(err, check.IsNil)
s.checkSignatureExpiry(c, resp.ManifestText, time.Hour)
// distant future trash date does not cause higher signature TTL
- trashed, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+ trashed, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{
UUID: trashed.UUID,
Attrs: map[string]interface{}{
"trash_at": time.Now().UTC().Add(time.Hour * 24 * 365),
}})
c.Assert(err, check.IsNil)
s.checkSignatureExpiry(c, trashed.ManifestText, time.Hour*24*7*2)
- resp, err = s.localdb.CollectionGet(ctx, arvados.GetOptions{UUID: trashed.UUID})
+ resp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})
c.Assert(err, check.IsNil)
s.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)
// Make sure groups/contents doesn't return manifest_text with
// collections (if it did, we'd need to sign it).
- gresp, err := s.localdb.GroupContents(ctx, arvados.GroupContentsOptions{
+ gresp, err := s.localdb.GroupContents(s.userctx, arvados.GroupContentsOptions{
Limit: -1,
Filters: []arvados.Filter{{"uuid", "=", arvadostest.FooCollection}},
Select: []string{"uuid", "manifest_text"},
@@ -386,9 +331,7 @@ func (s *CollectionSuite) checkSignatureExpiry(c *check.C, manifestText string,
func (s *CollectionSuite) TestSignaturesDisabled(c *check.C) {
s.localdb.cluster.Collections.BlobSigning = false
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
-
- resp, err := s.localdb.CollectionGet(ctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
+ resp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})
c.Check(err, check.IsNil)
c.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ +]*\+3 0:.*`)
}
diff --git a/lib/controller/localdb/conn.go b/lib/controller/localdb/conn.go
index a36822ad6b..6ab9e1450b 100644
--- a/lib/controller/localdb/conn.go
+++ b/lib/controller/localdb/conn.go
@@ -8,9 +8,9 @@ import (
"context"
"encoding/json"
"fmt"
+ "net"
"net/http"
"os"
- "strings"
"sync"
"time"
@@ -20,6 +20,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
"github.com/hashicorp/yamux"
+ "github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus"
)
@@ -28,23 +29,32 @@ type railsProxy = rpc.Conn
type Conn struct {
cluster *arvados.Cluster
*railsProxy // handles API methods that aren't defined on Conn itself
+ getdb func(context.Context) (*sqlx.DB, error)
vocabularyCache *arvados.Vocabulary
vocabularyFileModTime time.Time
lastVocabularyRefreshCheck time.Time
lastVocabularyError error
loginController
- gwTunnels map[string]*yamux.Session
- gwTunnelsLock sync.Mutex
+ gwTunnels map[string]*yamux.Session
+ gwTunnelsLock sync.Mutex
+ activeUsers map[string]bool
+ activeUsersLock sync.Mutex
+ activeUsersReset time.Time
+
+ wantContainerPriorityUpdate chan struct{}
}
-func NewConn(cluster *arvados.Cluster) *Conn {
+func NewConn(bgCtx context.Context, cluster *arvados.Cluster, getdb func(context.Context) (*sqlx.DB, error)) *Conn {
railsProxy := railsproxy.NewConn(cluster)
railsProxy.RedactHostInErrors = true
conn := Conn{
- cluster: cluster,
- railsProxy: railsProxy,
+ cluster: cluster,
+ railsProxy: railsProxy,
+ getdb: getdb,
+ wantContainerPriorityUpdate: make(chan struct{}, 1),
}
conn.loginController = chooseLoginController(cluster, &conn)
+ go conn.runContainerPriorityUpdateThread(bgCtx)
return &conn
}
@@ -163,53 +173,25 @@ func (conn *Conn) UserAuthenticate(ctx context.Context, opts arvados.UserAuthent
return conn.loginController.UserAuthenticate(ctx, opts)
}
-func (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupContentsOptions) (arvados.ObjectList, error) {
- // The requested UUID can be a user (virtual home project), which we just pass on to
- // the API server.
- if strings.Index(options.UUID, "-j7d0g-") != 5 {
- return conn.railsProxy.GroupContents(ctx, options)
- }
-
- var resp arvados.ObjectList
-
- // Get the group object
- respGroup, err := conn.GroupGet(ctx, arvados.GetOptions{UUID: options.UUID})
- if err != nil {
- return resp, err
- }
-
- // If the group has groupClass 'filter', apply the filters before getting the contents.
- if respGroup.GroupClass == "filter" {
- if filters, ok := respGroup.Properties["filters"].([]interface{}); ok {
- for _, f := range filters {
- // f is supposed to be a []string
- tmp, ok2 := f.([]interface{})
- if !ok2 || len(tmp) < 3 {
- return resp, fmt.Errorf("filter unparsable: %T, %+v, original field: %T, %+v\n", tmp, tmp, f, f)
- }
- var filter arvados.Filter
- if attr, ok2 := tmp[0].(string); ok2 {
- filter.Attr = attr
- } else {
- return resp, fmt.Errorf("filter unparsable: attribute must be string: %T, %+v, filter: %T, %+v\n", tmp[0], tmp[0], f, f)
- }
- if operator, ok2 := tmp[1].(string); ok2 {
- filter.Operator = operator
- } else {
- return resp, fmt.Errorf("filter unparsable: operator must be string: %T, %+v, filter: %T, %+v\n", tmp[1], tmp[1], f, f)
- }
- filter.Operand = tmp[2]
- options.Filters = append(options.Filters, filter)
- }
- } else {
- return resp, fmt.Errorf("filter unparsable: not an array\n")
+var privateNetworks = func() (nets []*net.IPNet) {
+ for _, s := range []string{
+ "127.0.0.0/8",
+ "10.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "169.254.0.0/16",
+ "::1/128",
+ "fe80::/10",
+ "fc00::/7",
+ } {
+ _, n, err := net.ParseCIDR(s)
+ if err != nil {
+ panic(fmt.Sprintf("privateNetworks: %q: %s", s, err))
}
- // Use the generic /groups/contents endpoint for filter groups
- options.UUID = ""
+ nets = append(nets, n)
}
-
- return conn.railsProxy.GroupContents(ctx, options)
-}
+ return
+}()
func httpErrorf(code int, format string, args ...interface{}) error {
return httpserver.ErrorWithStatus(fmt.Errorf(format, args...), code)
diff --git a/lib/controller/localdb/container.go b/lib/controller/localdb/container.go
new file mode 100644
index 0000000000..da2e16e703
--- /dev/null
+++ b/lib/controller/localdb/container.go
@@ -0,0 +1,134 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/sirupsen/logrus"
+)
+
+// ContainerUpdate defers to railsProxy and then notifies the
+// container priority updater thread.
+func (conn *Conn) ContainerUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Container, error) {
+ resp, err := conn.railsProxy.ContainerUpdate(ctx, opts)
+ if err == nil {
+ select {
+ case conn.wantContainerPriorityUpdate <- struct{}{}:
+ default:
+ // update already pending
+ }
+ }
+ return resp, err
+}
+
+var containerPriorityUpdateInterval = 5 * time.Minute
+
+// runContainerPriorityUpdateThread periodically (and immediately
+// after each container update request) corrects any inconsistent
+// container priorities caused by races.
+func (conn *Conn) runContainerPriorityUpdateThread(ctx context.Context) {
+ ctx = ctrlctx.NewWithToken(ctx, conn.cluster, conn.cluster.SystemRootToken)
+ log := ctxlog.FromContext(ctx).WithField("worker", "runContainerPriorityUpdateThread")
+ ticker := time.NewTicker(containerPriorityUpdateInterval)
+ for ctx.Err() == nil {
+ select {
+ case <-ticker.C:
+ case <-conn.wantContainerPriorityUpdate:
+ case <-ctx.Done():
+ return
+ }
+ err := conn.containerPriorityUpdate(ctx, log)
+ if err != nil {
+ log.WithError(err).Warn("error updating container priorities")
+ }
+ }
+}
+
+func (conn *Conn) containerPriorityUpdate(ctx context.Context, log logrus.FieldLogger) error {
+ db, err := conn.getdb(ctx)
+ if err != nil {
+ return fmt.Errorf("getdb: %w", err)
+ }
+ // Stage 1: Fix containers that have priority>0 but should
+ // have priority=0 because there are no active
+ // container_requests (unfinished, priority>0) associated with
+ // them.
+ res, err := db.ExecContext(ctx, `
+ UPDATE containers
+ SET priority=0
+ WHERE state IN ('Queued', 'Locked', 'Running')
+ AND priority>0
+ AND uuid NOT IN (
+ SELECT container_uuid
+ FROM container_requests
+ WHERE priority > 0
+ AND state = 'Committed')`)
+ if err != nil {
+ return fmt.Errorf("update: %w", err)
+ } else if rows, err := res.RowsAffected(); err != nil {
+ return fmt.Errorf("update: %w", err)
+ } else if rows > 0 {
+ log.Infof("found %d containers with priority>0 and no active requests, updated to priority=0", rows)
+ }
+
+ // Stage 2: Fix containers that have priority=0 but should
+ // have priority>0 because there are active container_requests
+ // (priority>0, unfinished, and not children of cancelled
+ // containers).
+ //
+ // Fixing here means calling out to RailsAPI to compute the
+ // correct priority for the contianer and (if needed)
+ // propagate that change to child containers.
+
+ // In this loop we look for a single container that needs
+ // fixing, call out to Rails to fix it, and repeat until we
+ // don't find any more.
+ //
+ // We could get a batch of UUIDs that need attention by
+ // increasing LIMIT 1, however, updating priority on one
+ // container typically cascades to other containers, so we
+ // would often end up repeating work.
+ for lastUUID := ""; ; {
+ var uuid string
+ err := db.QueryRowxContext(ctx, `
+ SELECT containers.uuid from containers
+ JOIN container_requests
+ ON container_requests.container_uuid = containers.uuid
+ AND container_requests.state = 'Committed' AND container_requests.priority > 0
+ LEFT JOIN containers parent
+ ON parent.uuid = container_requests.requesting_container_uuid
+ WHERE containers.state IN ('Queued', 'Locked', 'Running')
+ AND containers.priority = 0
+ AND (parent.uuid IS NULL OR parent.priority > 0)
+ ORDER BY containers.created_at
+ LIMIT 1`).Scan(&uuid)
+ if err == sql.ErrNoRows {
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("join: %w", err)
+ }
+ if uuid == lastUUID {
+ // We don't want to keep hammering this
+ // forever if the ContainerPriorityUpdate call
+ // didn't achieve anything.
+ return fmt.Errorf("possible lack of progress: container %s still has priority=0 after updating", uuid)
+ }
+ lastUUID = uuid
+ upd, err := conn.railsProxy.ContainerPriorityUpdate(ctx, arvados.UpdateOptions{UUID: uuid, Select: []string{"uuid", "priority"}})
+ if err != nil {
+ return err
+ }
+ log.Debugf("updated container %s priority from 0 to %d", uuid, upd.Priority)
+ }
+ return nil
+}
diff --git a/lib/controller/localdb/container_gateway.go b/lib/controller/localdb/container_gateway.go
index 77c5182e9c..0b6a630fae 100644
--- a/lib/controller/localdb/container_gateway.go
+++ b/lib/controller/localdb/container_gateway.go
@@ -19,16 +19,21 @@ import (
"io/ioutil"
"net"
"net/http"
+ "net/http/httputil"
"net/url"
+ "os"
"strings"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/lib/service"
+ "git.arvados.org/arvados.git/lib/webdavfs"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
+ keepweb "git.arvados.org/arvados.git/services/keep-web"
"github.com/hashicorp/yamux"
+ "golang.org/x/net/webdav"
)
var (
@@ -36,6 +41,298 @@ var (
forceInternalURLForTest *arvados.URL
)
+// ContainerRequestLog returns a WebDAV handler that reads logs from
+// the indicated container request. It works by proxying the incoming
+// HTTP request to
+//
+// - the container gateway, if there is an associated container that
+// is running
+//
+// - a different controller process, if there is a running container
+// whose gateway is accessible through a tunnel to a different
+// controller process
+//
+// - keep-web, if saved logs exist and there is no gateway (or the
+// associated container is finished)
+//
+// - an empty-collection stub, if there is no gateway and no saved
+// log
+//
+// For an incoming request
+//
+// GET /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}{/c_log_path}
+//
+// The upstream request may be to {c_uuid}'s container gateway
+//
+// GET /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}{/c_log_path}
+// X-Webdav-Prefix: /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}
+// X-Webdav-Source: /log
+//
+// ...or the upstream request may be to keep-web (where {cr_log_uuid}
+// is the container request log collection UUID)
+//
+// GET /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}{/c_log_path}
+// Host: {cr_log_uuid}.internal
+// X-Webdav-Prefix: /arvados/v1/container_requests/{cr_uuid}/log
+// X-Arvados-Container-Uuid: {c_uuid}
+//
+// ...or the request may be handled locally using an empty-collection
+// stub.
+func (conn *Conn) ContainerRequestLog(ctx context.Context, opts arvados.ContainerLogOptions) (http.Handler, error) {
+ if opts.Method == "OPTIONS" && opts.Header.Get("Access-Control-Request-Method") != "" {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !keepweb.ServeCORSPreflight(w, opts.Header) {
+ // Inconceivable. We already checked
+ // for the only condition where
+ // ServeCORSPreflight returns false.
+ httpserver.Error(w, "unhandled CORS preflight request", http.StatusInternalServerError)
+ }
+ }), nil
+ }
+ cr, err := conn.railsProxy.ContainerRequestGet(ctx, arvados.GetOptions{UUID: opts.UUID, Select: []string{"uuid", "container_uuid", "log_uuid"}})
+ if err != nil {
+ if se := httpserver.HTTPStatusError(nil); errors.As(err, &se) && se.HTTPStatus() == http.StatusUnauthorized {
+ // Hint to WebDAV client that we accept HTTP basic auth.
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Www-Authenticate", "Basic realm=\"collections\"")
+ w.WriteHeader(http.StatusUnauthorized)
+ }), nil
+ }
+ return nil, err
+ }
+ ctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: cr.ContainerUUID, Select: []string{"uuid", "state", "gateway_address"}})
+ if err != nil {
+ return nil, err
+ }
+ // .../log/{ctr.UUID} is a directory where the currently
+ // assigned container's log data [will] appear (as opposed to
+ // previous attempts in .../log/{previous_ctr_uuid}). Requests
+ // that are outside that directory, and requests on a
+ // non-running container, are proxied to keep-web instead of
+ // going through the container gateway system.
+ //
+ // Side note: a depth>1 directory tree listing starting at
+ // .../{cr_uuid}/log will only include subdirectories for
+ // finished containers, i.e., will not include a subdirectory
+ // with log data for a current (unfinished) container UUID.
+ // In order to access live logs, a client must look up the
+ // container_uuid field of the container request record, and
+ // explicitly request a path under .../{cr_uuid}/log/{c_uuid}.
+ if ctr.GatewayAddress == "" ||
+ (ctr.State != arvados.ContainerStateLocked && ctr.State != arvados.ContainerStateRunning) ||
+ !(opts.Path == "/"+ctr.UUID || strings.HasPrefix(opts.Path, "/"+ctr.UUID+"/")) {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ conn.serveContainerRequestLogViaKeepWeb(opts, cr, w, r)
+ }), nil
+ }
+ dial, arpc, err := conn.findGateway(ctx, ctr, opts.NoForward)
+ if err != nil {
+ return nil, err
+ }
+ if arpc != nil {
+ opts.NoForward = true
+ return arpc.ContainerRequestLog(ctx, opts)
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ r = r.WithContext(ctx)
+ var proxyReq *http.Request
+ var proxyErr error
+ var expectRespondAuth string
+ proxy := &httputil.ReverseProxy{
+ // Our custom Transport:
+ //
+ // - Uses a custom dialer to connect to the
+ // gateway (either directly or through a
+ // tunnel set up though ContainerTunnel)
+ //
+ // - Verifies the gateway's TLS certificate
+ // using X-Arvados-Authorization headers.
+ //
+ // This involves modifying the outgoing
+ // request header in DialTLSContext.
+ // (ReverseProxy certainly doesn't expect us
+ // to do this, but it works.)
+ Transport: &http.Transport{
+ DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ tlsconn, requestAuth, respondAuth, err := dial()
+ if err != nil {
+ return nil, err
+ }
+ proxyReq.Header.Set("X-Arvados-Authorization", requestAuth)
+ expectRespondAuth = respondAuth
+ return tlsconn, nil
+ },
+ },
+ Director: func(r *http.Request) {
+ // Scheme/host of incoming r.URL are
+ // irrelevant now, and may even be
+ // missing. Host is ignored by our
+ // DialTLSContext, but we need a
+ // generic syntactically correct URL
+ // for net/http to work with.
+ r.URL.Scheme = "https"
+ r.URL.Host = "0.0.0.0:0"
+ r.Header.Set("X-Arvados-Container-Gateway-Uuid", ctr.UUID)
+ r.Header.Set("X-Webdav-Prefix", "/arvados/v1/container_requests/"+cr.UUID+"/log/"+ctr.UUID)
+ r.Header.Set("X-Webdav-Source", "/log")
+ proxyReq = r
+ },
+ ModifyResponse: func(resp *http.Response) error {
+ if resp.Header.Get("X-Arvados-Authorization-Response") != expectRespondAuth {
+ // Note this is how we detect
+ // an attacker-in-the-middle.
+ return httpserver.ErrorWithStatus(errors.New("bad X-Arvados-Authorization-Response header"), http.StatusBadGateway)
+ }
+ resp.Header.Del("X-Arvados-Authorization-Response")
+ preemptivelyDeduplicateHeaders(w.Header(), resp.Header)
+ return nil
+ },
+ ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
+ proxyErr = err
+ },
+ }
+ proxy.ServeHTTP(w, r)
+ if proxyErr == nil {
+ // proxy succeeded
+ return
+ }
+ // If proxying to the container gateway fails, it
+ // might be caused by a race where crunch-run exited
+ // after we decided (above) the log was not final.
+ // In that case we should proxy to keep-web.
+ ctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{
+ UUID: ctr.UUID,
+ Select: []string{"uuid", "state", "gateway_address", "log"},
+ })
+ if err != nil {
+ // Lost access to the container record?
+ httpserver.Error(w, "error re-fetching container record: "+err.Error(), http.StatusServiceUnavailable)
+ } else if ctr.State == arvados.ContainerStateLocked || ctr.State == arvados.ContainerStateRunning {
+ // No race, proxyErr was the best we can do
+ httpserver.Error(w, "proxy error: "+proxyErr.Error(), http.StatusServiceUnavailable)
+ } else {
+ conn.serveContainerRequestLogViaKeepWeb(opts, cr, w, r)
+ }
+ }), nil
+}
+
+// serveContainerLogViaKeepWeb handles a request for saved container
+// log content by proxying to one of the configured keep-web servers.
+//
+// It tries to choose a keep-web server that is running on this host.
+func (conn *Conn) serveContainerRequestLogViaKeepWeb(opts arvados.ContainerLogOptions, cr arvados.ContainerRequest, w http.ResponseWriter, r *http.Request) {
+ if cr.LogUUID == "" {
+ // Special case: if no log data exists yet, we serve
+ // an empty collection by ourselves instead of
+ // proxying to keep-web.
+ conn.serveEmptyDir("/arvados/v1/container_requests/"+cr.UUID+"/log", w, r)
+ return
+ }
+ myURL, _ := service.URLFromContext(r.Context())
+ u := url.URL(myURL)
+ myHostname := u.Hostname()
+ var webdavBase arvados.URL
+ var ok bool
+ for webdavBase = range conn.cluster.Services.WebDAV.InternalURLs {
+ ok = true
+ u := url.URL(webdavBase)
+ if h := u.Hostname(); h == "127.0.0.1" || h == "0.0.0.0" || h == "::1" || h == myHostname {
+ // Prefer a keep-web service running on the
+ // same host as us. (If we don't find one, we
+ // pick one arbitrarily.)
+ break
+ }
+ }
+ if !ok {
+ httpserver.Error(w, "no internalURLs configured for WebDAV service", http.StatusInternalServerError)
+ return
+ }
+ proxy := &httputil.ReverseProxy{
+ Director: func(r *http.Request) {
+ r.URL.Scheme = webdavBase.Scheme
+ r.URL.Host = webdavBase.Host
+ // Outgoing Host header specifies the
+ // collection ID.
+ r.Host = cr.LogUUID + ".internal"
+ // We already checked permission on the
+ // container, so we can use a root token here
+ // instead of counting on the "access to log
+ // via container request and container"
+ // permission check, which can be racy when a
+ // request gets retried with a new container.
+ r.Header.Set("Authorization", "Bearer "+conn.cluster.SystemRootToken)
+ // We can't change r.URL.Path without
+ // confusing WebDAV (request body and response
+ // headers refer to the same paths) so we tell
+ // keep-web to map the log collection onto the
+ // containers/X/log/ namespace.
+ r.Header.Set("X-Webdav-Prefix", "/arvados/v1/container_requests/"+cr.UUID+"/log")
+ if len(opts.Path) >= 28 && opts.Path[6:13] == "-dz642-" {
+ // "/arvados/v1/container_requests/{crUUID}/log/{cUUID}..."
+ // proxies to
+ // "/log for container {cUUID}..."
+ r.Header.Set("X-Webdav-Prefix", "/arvados/v1/container_requests/"+cr.UUID+"/log/"+opts.Path[1:28])
+ r.Header.Set("X-Webdav-Source", "/log for container "+opts.Path[1:28]+"/")
+ }
+ },
+ ModifyResponse: func(resp *http.Response) error {
+ preemptivelyDeduplicateHeaders(w.Header(), resp.Header)
+ return nil
+ },
+ }
+ if conn.cluster.TLS.Insecure {
+ proxy.Transport = &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: conn.cluster.TLS.Insecure,
+ },
+ }
+ }
+ proxy.ServeHTTP(w, r)
+}
+
+// httputil.ReverseProxy uses (http.Header)Add() to copy headers from
+// the upstream Response to the downstream ResponseWriter. If headers
+// have already been set on the downstream ResponseWriter, Add() will
+// result in duplicate headers. For example, if we set CORS headers
+// and then use ReverseProxy with an upstream that also sets CORS
+// headers, our client will receive
+//
+// Access-Control-Allow-Origin: *
+// Access-Control-Allow-Origin: *
+//
+// ...which is incorrect.
+//
+// preemptivelyDeduplicateHeaders, when called from a ModifyResponse
+// hook, solves this by removing any conflicting headers from
+// ResponseWriter. This way, when ReverseProxy calls Add(), it will
+// assign the new values without causing duplicates.
+//
+// dst is the downstream ResponseWriter's Header(). src is the
+// upstream resp.Header.
+func preemptivelyDeduplicateHeaders(dst, src http.Header) {
+ for hdr := range src {
+ dst.Del(hdr)
+ }
+}
+
+// serveEmptyDir handles read-only webdav requests as if there was an
+// empty collection rooted at the given path. It's equivalent to
+// proxying to an empty collection in keep-web, but avoids the extra
+// hop.
+func (conn *Conn) serveEmptyDir(path string, w http.ResponseWriter, r *http.Request) {
+ wh := webdav.Handler{
+ Prefix: path,
+ FileSystem: webdav.NewMemFS(),
+ LockSystem: webdavfs.NoLockSystem,
+ Logger: func(r *http.Request, err error) {
+ if err != nil && !os.IsNotExist(err) {
+ ctxlog.FromContext(r.Context()).WithError(err).Info("webdav error on empty collection fs")
+ }
+ },
+ }
+ wh.ServeHTTP(w, r)
+}
+
// ContainerSSH returns a connection to the SSH server in the
// appropriate crunch-run process on the worker node where the
// specified container is running.
@@ -47,7 +344,7 @@ func (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOpt
if err != nil {
return sshconn, err
}
- ctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: opts.UUID})
+ ctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: opts.UUID, Select: []string{"uuid", "state", "gateway_address", "interactive_session_started"}})
if err != nil {
return sshconn, err
}
@@ -70,138 +367,36 @@ func (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOpt
}
}
- conn.gwTunnelsLock.Lock()
- tunnel := conn.gwTunnels[opts.UUID]
- conn.gwTunnelsLock.Unlock()
-
if ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {
return sshconn, httpserver.ErrorWithStatus(fmt.Errorf("container is not running yet (state is %q)", ctr.State), http.StatusServiceUnavailable)
} else if ctr.State != arvados.ContainerStateRunning {
return sshconn, httpserver.ErrorWithStatus(fmt.Errorf("container has ended (state is %q)", ctr.State), http.StatusGone)
}
- // targetHost is the value we'll use in the Host header in our
- // "Upgrade: ssh" http request. It's just a placeholder
- // "localhost", unless we decide to connect directly, in which
- // case we'll set it to the gateway's external ip:host. (The
- // gateway doesn't even look at it, but we might as well.)
- targetHost := "localhost"
- myURL, _ := service.URLFromContext(ctx)
-
- var rawconn net.Conn
- if host, _, splitErr := net.SplitHostPort(ctr.GatewayAddress); splitErr == nil && host != "" && host != "127.0.0.1" {
- // If crunch-run provided a GatewayAddress like
- // "ipaddr:port", that means "ipaddr" is one of the
- // external interfaces where the gateway is
- // listening. In that case, it's the most
- // reliable/direct option, so we use it even if a
- // tunnel might also be available.
- targetHost = ctr.GatewayAddress
- rawconn, err = net.Dial("tcp", ctr.GatewayAddress)
- if err != nil {
- return sshconn, httpserver.ErrorWithStatus(err, http.StatusServiceUnavailable)
- }
- } else if tunnel != nil && !(forceProxyForTest && !opts.NoForward) {
- // If we can't connect directly, and the gateway has
- // established a yamux tunnel with us, connect through
- // the tunnel.
- //
- // ...except: forceProxyForTest means we are emulating
- // a situation where the gateway has established a
- // yamux tunnel with controller B, and the
- // ContainerSSH request arrives at controller A. If
- // opts.NoForward==false then we are acting as A, so
- // we pretend not to have a tunnel, and fall through
- // to the "tunurl" case below. If opts.NoForward==true
- // then the client is A and we are acting as B, so we
- // connect to our tunnel.
- rawconn, err = tunnel.Open()
- if err != nil {
- return sshconn, httpserver.ErrorWithStatus(err, http.StatusServiceUnavailable)
- }
- } else if ctr.GatewayAddress == "" {
- return sshconn, httpserver.ErrorWithStatus(errors.New("container is running but gateway is not available"), http.StatusServiceUnavailable)
- } else if tunurl := strings.TrimPrefix(ctr.GatewayAddress, "tunnel "); tunurl != ctr.GatewayAddress &&
- tunurl != "" &&
- tunurl != myURL.String() &&
- !opts.NoForward {
- // If crunch-run provided a GatewayAddress like
- // "tunnel https://10.0.0.10:1010/", that means the
- // gateway has established a yamux tunnel with the
- // controller process at the indicated InternalURL
- // (which isn't us, otherwise we would have had
- // "tunnel != nil" above). We need to proxy through to
- // the other controller process in order to use the
- // tunnel.
- for u := range conn.cluster.Services.Controller.InternalURLs {
- if u.String() == tunurl {
- ctxlog.FromContext(ctx).Debugf("proxying ContainerSSH request to other controller at %s", u)
- u := url.URL(u)
- arpc := rpc.NewConn(conn.cluster.ClusterID, &u, conn.cluster.TLS.Insecure, rpc.PassthroughTokenProvider)
- opts.NoForward = true
- return arpc.ContainerSSH(ctx, opts)
- }
- }
- ctxlog.FromContext(ctx).Warnf("container gateway provided a tunnel endpoint %s that is not one of Services.Controller.InternalURLs", tunurl)
- return sshconn, httpserver.ErrorWithStatus(errors.New("container gateway is running but tunnel endpoint is invalid"), http.StatusServiceUnavailable)
- } else {
- return sshconn, httpserver.ErrorWithStatus(errors.New("container gateway is running but tunnel is down"), http.StatusServiceUnavailable)
+ dial, arpc, err := conn.findGateway(ctx, ctr, opts.NoForward)
+ if err != nil {
+ return sshconn, err
+ }
+ if arpc != nil {
+ opts.NoForward = true
+ return arpc.ContainerSSH(ctx, opts)
}
- // crunch-run uses a self-signed / unverifiable TLS
- // certificate, so we use the following scheme to ensure we're
- // not talking to a MITM.
- //
- // 1. Compute ctrKey = HMAC-SHA256(sysRootToken,ctrUUID) --
- // this will be the same ctrKey that a-d-c supplied to
- // crunch-run in the GatewayAuthSecret env var.
- //
- // 2. Compute requestAuth = HMAC-SHA256(ctrKey,serverCert) and
- // send it to crunch-run as the X-Arvados-Authorization
- // header, proving that we know ctrKey. (Note a MITM cannot
- // replay the proof to a real crunch-run server, because the
- // real crunch-run server would have a different cert.)
- //
- // 3. Compute respondAuth = HMAC-SHA256(ctrKey,requestAuth)
- // and ensure the server returns it in the
- // X-Arvados-Authorization-Response header, proving that the
- // server knows ctrKey.
- var requestAuth, respondAuth string
- tlsconn := tls.Client(rawconn, &tls.Config{
- InsecureSkipVerify: true,
- VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- if len(rawCerts) == 0 {
- return errors.New("no certificate received, cannot compute authorization header")
- }
- h := hmac.New(sha256.New, []byte(conn.cluster.SystemRootToken))
- fmt.Fprint(h, opts.UUID)
- authKey := fmt.Sprintf("%x", h.Sum(nil))
- h = hmac.New(sha256.New, []byte(authKey))
- h.Write(rawCerts[0])
- requestAuth = fmt.Sprintf("%x", h.Sum(nil))
- h.Reset()
- h.Write([]byte(requestAuth))
- respondAuth = fmt.Sprintf("%x", h.Sum(nil))
- return nil
- },
- })
- err = tlsconn.HandshakeContext(ctx)
+ tlsconn, requestAuth, respondAuth, err := dial()
if err != nil {
- return sshconn, httpserver.ErrorWithStatus(fmt.Errorf("TLS handshake failed: %w", err), http.StatusBadGateway)
- }
- if respondAuth == "" {
- tlsconn.Close()
- return sshconn, httpserver.ErrorWithStatus(errors.New("BUG: no respondAuth"), http.StatusInternalServerError)
+ return sshconn, err
}
bufr := bufio.NewReader(tlsconn)
bufw := bufio.NewWriter(tlsconn)
u := url.URL{
Scheme: "http",
- Host: targetHost,
+ Host: tlsconn.RemoteAddr().String(),
Path: "/ssh",
}
postform := url.Values{
+ // uuid is only needed for older crunch-run versions
+ // (current version uses X-Arvados-* header below)
"uuid": {opts.UUID},
"detach_keys": {opts.DetachKeys},
"login_username": {opts.LoginUsername},
@@ -211,6 +406,7 @@ func (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOpt
bufw.WriteString("POST " + u.String() + " HTTP/1.1\r\n")
bufw.WriteString("Host: " + u.Host + "\r\n")
bufw.WriteString("Upgrade: ssh\r\n")
+ bufw.WriteString("X-Arvados-Container-Gateway-Uuid: " + opts.UUID + "\r\n")
bufw.WriteString("X-Arvados-Authorization: " + requestAuth + "\r\n")
bufw.WriteString("Content-Type: application/x-www-form-urlencoded\r\n")
fmt.Fprintf(bufw, "Content-Length: %d\r\n", len(postdata))
@@ -308,3 +504,137 @@ func (conn *Conn) ContainerGatewayTunnel(ctx context.Context, opts arvados.Conta
}
return
}
+
+type gatewayDialer func() (conn net.Conn, requestAuth, respondAuth string, err error)
+
+// findGateway figures out how to connect to ctr's gateway.
+//
+// If the gateway can be contacted directly or through a tunnel on
+// this instance, the first return value is a non-nil dialer.
+//
+// If the gateway is only accessible through a tunnel through a
+// different controller process, the second return value is a non-nil
+// *rpc.Conn for that controller.
+func (conn *Conn) findGateway(ctx context.Context, ctr arvados.Container, noForward bool) (gatewayDialer, *rpc.Conn, error) {
+ conn.gwTunnelsLock.Lock()
+ tunnel := conn.gwTunnels[ctr.UUID]
+ conn.gwTunnelsLock.Unlock()
+
+ myURL, _ := service.URLFromContext(ctx)
+
+ if host, _, splitErr := net.SplitHostPort(ctr.GatewayAddress); splitErr == nil && host != "" && host != "127.0.0.1" {
+ // If crunch-run provided a GatewayAddress like
+ // "ipaddr:port", that means "ipaddr" is one of the
+ // external interfaces where the gateway is
+ // listening. In that case, it's the most
+ // reliable/direct option, so we use it even if a
+ // tunnel might also be available.
+ return func() (net.Conn, string, string, error) {
+ rawconn, err := (&net.Dialer{}).DialContext(ctx, "tcp", ctr.GatewayAddress)
+ if err != nil {
+ return nil, "", "", httpserver.ErrorWithStatus(err, http.StatusServiceUnavailable)
+ }
+ return conn.dialGatewayTLS(ctx, ctr, rawconn)
+ }, nil, nil
+ }
+ if tunnel != nil && !(forceProxyForTest && !noForward) {
+ // If we can't connect directly, and the gateway has
+ // established a yamux tunnel with us, connect through
+ // the tunnel.
+ //
+ // ...except: forceProxyForTest means we are emulating
+ // a situation where the gateway has established a
+ // yamux tunnel with controller B, and the
+ // ContainerSSH request arrives at controller A. If
+ // noForward==false then we are acting as A, so
+ // we pretend not to have a tunnel, and fall through
+ // to the "tunurl" case below. If noForward==true
+ // then the client is A and we are acting as B, so we
+ // connect to our tunnel.
+ return func() (net.Conn, string, string, error) {
+ rawconn, err := tunnel.Open()
+ if err != nil {
+ return nil, "", "", httpserver.ErrorWithStatus(err, http.StatusServiceUnavailable)
+ }
+ return conn.dialGatewayTLS(ctx, ctr, rawconn)
+ }, nil, nil
+ }
+ if tunurl := strings.TrimPrefix(ctr.GatewayAddress, "tunnel "); tunurl != ctr.GatewayAddress &&
+ tunurl != "" &&
+ tunurl != myURL.String() &&
+ !noForward {
+ // If crunch-run provided a GatewayAddress like
+ // "tunnel https://10.0.0.10:1010/", that means the
+ // gateway has established a yamux tunnel with the
+ // controller process at the indicated InternalURL
+ // (which isn't us, otherwise we would have had
+ // "tunnel != nil" above). We need to proxy through to
+ // the other controller process in order to use the
+ // tunnel.
+ for u := range conn.cluster.Services.Controller.InternalURLs {
+ if u.String() == tunurl {
+ ctxlog.FromContext(ctx).Debugf("connecting to container gateway through other controller at %s", u)
+ u := url.URL(u)
+ return nil, rpc.NewConn(conn.cluster.ClusterID, &u, conn.cluster.TLS.Insecure, rpc.PassthroughTokenProvider), nil
+ }
+ }
+ ctxlog.FromContext(ctx).Warnf("container gateway provided a tunnel endpoint %s that is not one of Services.Controller.InternalURLs", tunurl)
+ return nil, nil, httpserver.ErrorWithStatus(errors.New("container gateway is running but tunnel endpoint is invalid"), http.StatusServiceUnavailable)
+ }
+ if ctr.GatewayAddress == "" {
+ return nil, nil, httpserver.ErrorWithStatus(errors.New("container is running but gateway is not available"), http.StatusServiceUnavailable)
+ } else {
+ return nil, nil, httpserver.ErrorWithStatus(errors.New("container is running but tunnel is down"), http.StatusServiceUnavailable)
+ }
+}
+
+// dialGatewayTLS negotiates a TLS connection to a container gateway
+// over the given raw connection.
+func (conn *Conn) dialGatewayTLS(ctx context.Context, ctr arvados.Container, rawconn net.Conn) (*tls.Conn, string, string, error) {
+ // crunch-run uses a self-signed / unverifiable TLS
+ // certificate, so we use the following scheme to ensure we're
+ // not talking to an attacker-in-the-middle.
+ //
+ // 1. Compute ctrKey = HMAC-SHA256(sysRootToken,ctrUUID) --
+ // this will be the same ctrKey that a-d-c supplied to
+ // crunch-run in the GatewayAuthSecret env var.
+ //
+ // 2. Compute requestAuth = HMAC-SHA256(ctrKey,serverCert) and
+ // send it to crunch-run as the X-Arvados-Authorization
+ // header, proving that we know ctrKey. (Note a MITM cannot
+ // replay the proof to a real crunch-run server, because the
+ // real crunch-run server would have a different cert.)
+ //
+ // 3. Compute respondAuth = HMAC-SHA256(ctrKey,requestAuth)
+ // and ensure the server returns it in the
+ // X-Arvados-Authorization-Response header, proving that the
+ // server knows ctrKey.
+ var requestAuth, respondAuth string
+ tlsconn := tls.Client(rawconn, &tls.Config{
+ InsecureSkipVerify: true,
+ VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+ if len(rawCerts) == 0 {
+ return errors.New("no certificate received, cannot compute authorization header")
+ }
+ h := hmac.New(sha256.New, []byte(conn.cluster.SystemRootToken))
+ fmt.Fprint(h, ctr.UUID)
+ authKey := fmt.Sprintf("%x", h.Sum(nil))
+ h = hmac.New(sha256.New, []byte(authKey))
+ h.Write(rawCerts[0])
+ requestAuth = fmt.Sprintf("%x", h.Sum(nil))
+ h.Reset()
+ h.Write([]byte(requestAuth))
+ respondAuth = fmt.Sprintf("%x", h.Sum(nil))
+ return nil
+ },
+ })
+ err := tlsconn.HandshakeContext(ctx)
+ if err != nil {
+ return nil, "", "", httpserver.ErrorWithStatus(fmt.Errorf("TLS handshake failed: %w", err), http.StatusBadGateway)
+ }
+ if respondAuth == "" {
+ tlsconn.Close()
+ return nil, "", "", httpserver.ErrorWithStatus(errors.New("BUG: no respondAuth"), http.StatusInternalServerError)
+ }
+ return tlsconn, requestAuth, respondAuth, nil
+}
diff --git a/lib/controller/localdb/container_gateway_test.go b/lib/controller/localdb/container_gateway_test.go
index 2c882c7852..0c58a9192c 100644
--- a/lib/controller/localdb/container_gateway_test.go
+++ b/lib/controller/localdb/container_gateway_test.go
@@ -5,6 +5,7 @@
package localdb
import (
+ "bytes"
"context"
"crypto/hmac"
"crypto/sha256"
@@ -12,19 +13,26 @@ import (
"io"
"io/ioutil"
"net"
+ "net/http"
"net/http/httptest"
"net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
"strings"
"time"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/controller/router"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/lib/crunchrun"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
"golang.org/x/crypto/ssh"
check "gopkg.in/check.v1"
)
@@ -32,44 +40,54 @@ import (
var _ = check.Suite(&ContainerGatewaySuite{})
type ContainerGatewaySuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- ctx context.Context
+ localdbSuite
+ reqUUID string
ctrUUID string
+ srv *httptest.Server
gw *crunchrun.Gateway
}
-func (s *ContainerGatewaySuite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
-}
+func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
-func (s *ContainerGatewaySuite) SetUpSuite(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
+ cr, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "command": []string{"echo", time.Now().Format(time.RFC3339Nano)},
+ "container_count_max": 1,
+ "container_image": "arvados/apitestfixture:latest",
+ "cwd": "/tmp",
+ "environment": map[string]string{},
+ "output_path": "/out",
+ "priority": 1,
+ "state": arvados.ContainerRequestStateCommitted,
+ "mounts": map[string]interface{}{
+ "/out": map[string]interface{}{
+ "kind": "tmp",
+ "capacity": 1000000,
+ },
+ },
+ "runtime_constraints": map[string]interface{}{
+ "vcpus": 1,
+ "ram": 2,
+ }}})
c.Assert(err, check.IsNil)
- s.localdb = NewConn(s.cluster)
- s.ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
-
- s.ctrUUID = arvadostest.QueuedContainerUUID
+ s.reqUUID = cr.UUID
+ s.ctrUUID = cr.ContainerUUID
h := hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))
fmt.Fprint(h, s.ctrUUID)
authKey := fmt.Sprintf("%x", h.Sum(nil))
rtr := router.New(s.localdb, router.Config{})
- srv := httptest.NewUnstartedServer(rtr)
- srv.StartTLS()
+ s.srv = httptest.NewUnstartedServer(httpserver.AddRequestIDs(httpserver.LogRequests(rtr)))
+ s.srv.StartTLS()
// the test setup doesn't use lib/service so
// service.URLFromContext() returns nothing -- instead, this
// is how we advertise our internal URL and enable
// proxy-to-other-controller mode,
- forceInternalURLForTest = &arvados.URL{Scheme: "https", Host: srv.Listener.Addr().String()}
+ forceInternalURLForTest = &arvados.URL{Scheme: "https", Host: s.srv.Listener.Addr().String()}
ac := &arvados.Client{
- APIHost: srv.Listener.Addr().String(),
+ APIHost: s.srv.Listener.Addr().String(),
AuthToken: arvadostest.Dispatch1Token,
Insecure: true,
}
@@ -82,22 +100,14 @@ func (s *ContainerGatewaySuite) SetUpSuite(c *check.C) {
ArvadosClient: ac,
}
c.Assert(s.gw.Start(), check.IsNil)
- rootctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{s.cluster.SystemRootToken}})
+
+ rootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)
_, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{
UUID: s.ctrUUID,
Attrs: map[string]interface{}{
"state": arvados.ContainerStateLocked}})
c.Assert(err, check.IsNil)
-}
-
-func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
- // clear any tunnel sessions started by previous test cases
- s.localdb.gwTunnelsLock.Lock()
- s.localdb.gwTunnels = nil
- s.localdb.gwTunnelsLock.Unlock()
-
- rootctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{s.cluster.SystemRootToken}})
- _, err := s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{
+ _, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{
UUID: s.ctrUUID,
Attrs: map[string]interface{}{
"state": arvados.ContainerStateRunning,
@@ -106,10 +116,15 @@ func (s *ContainerGatewaySuite) SetUpTest(c *check.C) {
s.cluster.Containers.ShellAccess.Admin = true
s.cluster.Containers.ShellAccess.User = true
- _, err = arvadostest.DB(c, s.cluster).Exec(`update containers set interactive_session_started=$1 where uuid=$2`, false, s.ctrUUID)
+ _, err = s.db.Exec(`update containers set interactive_session_started=$1 where uuid=$2`, false, s.ctrUUID)
c.Check(err, check.IsNil)
}
+func (s *ContainerGatewaySuite) TearDownTest(c *check.C) {
+ s.srv.Close()
+ s.localdbSuite.TearDownTest(c)
+}
+
func (s *ContainerGatewaySuite) TestConfig(c *check.C) {
for _, trial := range []struct {
configAdmin bool
@@ -129,7 +144,7 @@ func (s *ContainerGatewaySuite) TestConfig(c *check.C) {
c.Logf("trial %#v", trial)
s.cluster.Containers.ShellAccess.Admin = trial.configAdmin
s.cluster.Containers.ShellAccess.User = trial.configUser
- ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{trial.sendToken}})
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, trial.sendToken)
sshconn, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
if trial.errorCode == 0 {
if !c.Check(err, check.IsNil) {
@@ -175,7 +190,7 @@ func (s *ContainerGatewaySuite) TestDirectTCP(c *check.C) {
}
c.Logf("connecting to %s", s.gw.Address)
- sshconn, err := s.localdb.ContainerSSH(s.ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
+ sshconn, err := s.localdb.ContainerSSH(s.userctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
c.Assert(err, check.IsNil)
c.Assert(sshconn.Conn, check.NotNil)
defer sshconn.Conn.Close()
@@ -211,9 +226,302 @@ func (s *ContainerGatewaySuite) TestDirectTCP(c *check.C) {
}
}
+func (s *ContainerGatewaySuite) setupLogCollection(c *check.C) {
+ files := map[string]string{
+ "stderr.txt": "hello world\n",
+ "a/b/c/d.html": "\n",
+ }
+ client := arvados.NewClientFromEnv()
+ ac, err := arvadosclient.New(client)
+ c.Assert(err, check.IsNil)
+ kc, err := keepclient.MakeKeepClient(ac)
+ c.Assert(err, check.IsNil)
+ cfs, err := (&arvados.Collection{}).FileSystem(client, kc)
+ c.Assert(err, check.IsNil)
+ for name, content := range files {
+ for i, ch := range name {
+ if ch == '/' {
+ err := cfs.Mkdir("/"+name[:i], 0777)
+ c.Assert(err, check.IsNil)
+ }
+ }
+ f, err := cfs.OpenFile("/"+name, os.O_CREATE|os.O_WRONLY, 0777)
+ c.Assert(err, check.IsNil)
+ f.Write([]byte(content))
+ err = f.Close()
+ c.Assert(err, check.IsNil)
+ }
+ cfs.Sync()
+ s.gw.LogCollection = cfs
+}
+
+func (s *ContainerGatewaySuite) saveLogAndCloseGateway(c *check.C) {
+ rootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)
+ txt, err := s.gw.LogCollection.MarshalManifest(".")
+ c.Assert(err, check.IsNil)
+ coll, err := s.localdb.CollectionCreate(rootctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "manifest_text": txt,
+ }})
+ c.Assert(err, check.IsNil)
+ _, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{
+ UUID: s.ctrUUID,
+ Attrs: map[string]interface{}{
+ "state": arvados.ContainerStateComplete,
+ "exit_code": 0,
+ "log": coll.PortableDataHash,
+ }})
+ c.Assert(err, check.IsNil)
+ updatedReq, err := s.localdb.ContainerRequestGet(rootctx, arvados.GetOptions{UUID: s.reqUUID})
+ c.Assert(err, check.IsNil)
+ c.Logf("container request log UUID is %s", updatedReq.LogUUID)
+ crLog, err := s.localdb.CollectionGet(rootctx, arvados.GetOptions{UUID: updatedReq.LogUUID, Select: []string{"manifest_text"}})
+ c.Assert(err, check.IsNil)
+ c.Logf("collection log manifest:\n%s", crLog.ManifestText)
+ // Ensure localdb can't circumvent the keep-web proxy test by
+ // getting content from the container gateway.
+ s.gw.LogCollection = nil
+}
+
+func (s *ContainerGatewaySuite) TestContainerRequestLogViaTunnel(c *check.C) {
+ forceProxyForTest = true
+ defer func() { forceProxyForTest = false }()
+
+ s.gw = s.setupGatewayWithTunnel(c)
+ s.setupLogCollection(c)
+
+ for _, broken := range []bool{false, true} {
+ c.Logf("broken=%v", broken)
+
+ if broken {
+ delete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)
+ } else {
+ s.cluster.Services.Controller.InternalURLs[*forceInternalURLForTest] = arvados.ServiceInstance{}
+ defer delete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)
+ }
+
+ r, err := http.NewRequestWithContext(s.userctx, "GET", "https://controller.example/arvados/v1/container_requests/"+s.reqUUID+"/log/"+s.ctrUUID+"/stderr.txt", nil)
+ c.Assert(err, check.IsNil)
+ r.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+ handler, err := s.localdb.ContainerRequestLog(s.userctx, arvados.ContainerLogOptions{
+ UUID: s.reqUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "GET",
+ Header: r.Header,
+ Path: "/" + s.ctrUUID + "/stderr.txt",
+ },
+ })
+ if broken {
+ c.Check(err, check.ErrorMatches, `.*tunnel endpoint is invalid.*`)
+ continue
+ }
+ c.Check(err, check.IsNil)
+ c.Assert(handler, check.NotNil)
+ rec := httptest.NewRecorder()
+ handler.ServeHTTP(rec, r)
+ resp := rec.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+ c.Check(string(buf), check.Equals, "hello world\n")
+ }
+}
+
+func (s *ContainerGatewaySuite) TestContainerRequestLogViaGateway(c *check.C) {
+ s.setupLogCollection(c)
+ s.testContainerRequestLog(c)
+}
+
+func (s *ContainerGatewaySuite) TestContainerRequestLogViaKeepWeb(c *check.C) {
+ s.setupLogCollection(c)
+ s.saveLogAndCloseGateway(c)
+ s.testContainerRequestLog(c)
+}
+
+func (s *ContainerGatewaySuite) testContainerRequestLog(c *check.C) {
+ for _, trial := range []struct {
+ method string
+ path string
+ header http.Header
+ unauthenticated bool
+ expectStatus int
+ expectBodyRe string
+ expectHeader http.Header
+ }{
+ {
+ method: "GET",
+ path: s.ctrUUID + "/stderr.txt",
+ expectStatus: http.StatusOK,
+ expectBodyRe: "hello world\n",
+ expectHeader: http.Header{
+ "Content-Type": {"text/plain; charset=utf-8"},
+ },
+ },
+ {
+ method: "GET",
+ path: s.ctrUUID + "/stderr.txt",
+ header: http.Header{
+ "Range": {"bytes=-6"},
+ },
+ expectStatus: http.StatusPartialContent,
+ expectBodyRe: "world\n",
+ expectHeader: http.Header{
+ "Content-Type": {"text/plain; charset=utf-8"},
+ "Content-Range": {"bytes 6-11/12"},
+ },
+ },
+ {
+ method: "OPTIONS",
+ path: s.ctrUUID + "/stderr.txt",
+ expectStatus: http.StatusOK,
+ expectBodyRe: "",
+ expectHeader: http.Header{
+ "Dav": {"1, 2"},
+ "Allow": {"OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"},
+ },
+ },
+ {
+ method: "OPTIONS",
+ path: s.ctrUUID + "/stderr.txt",
+ unauthenticated: true,
+ header: http.Header{
+ "Access-Control-Request-Method": {"POST"},
+ },
+ expectStatus: http.StatusOK,
+ expectBodyRe: "",
+ expectHeader: http.Header{
+ "Access-Control-Allow-Headers": {"Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout, Cache-Control"},
+ "Access-Control-Allow-Methods": {"COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK"},
+ "Access-Control-Allow-Origin": {"*"},
+ "Access-Control-Max-Age": {"86400"},
+ },
+ },
+ {
+ method: "PROPFIND",
+ path: s.ctrUUID + "/",
+ expectStatus: http.StatusMultiStatus,
+ expectBodyRe: `.*\Qstderr.txt \E.*>\n?`,
+ expectHeader: http.Header{
+ "Content-Type": {"text/xml; charset=utf-8"},
+ },
+ },
+ {
+ method: "PROPFIND",
+ path: s.ctrUUID,
+ expectStatus: http.StatusMultiStatus,
+ expectBodyRe: `.*\Qstderr.txt \E.*>\n?`,
+ expectHeader: http.Header{
+ "Content-Type": {"text/xml; charset=utf-8"},
+ },
+ },
+ {
+ method: "PROPFIND",
+ path: s.ctrUUID + "/a/b/c/",
+ expectStatus: http.StatusMultiStatus,
+ expectBodyRe: `.*\Qd.html \E.*>\n?`,
+ expectHeader: http.Header{
+ "Content-Type": {"text/xml; charset=utf-8"},
+ },
+ },
+ {
+ method: "GET",
+ path: s.ctrUUID + "/a/b/c/d.html",
+ expectStatus: http.StatusOK,
+ expectBodyRe: "\n",
+ expectHeader: http.Header{
+ "Content-Type": {"text/html; charset=utf-8"},
+ },
+ },
+ } {
+ c.Logf("trial %#v", trial)
+ ctx := s.userctx
+ if trial.unauthenticated {
+ ctx = auth.NewContext(context.Background(), auth.CredentialsFromRequest(&http.Request{URL: &url.URL{}, Header: http.Header{}}))
+ }
+ r, err := http.NewRequestWithContext(ctx, trial.method, "https://controller.example/arvados/v1/container_requests/"+s.reqUUID+"/log/"+trial.path, nil)
+ c.Assert(err, check.IsNil)
+ for k := range trial.header {
+ r.Header.Set(k, trial.header.Get(k))
+ }
+ handler, err := s.localdb.ContainerRequestLog(ctx, arvados.ContainerLogOptions{
+ UUID: s.reqUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: trial.method,
+ Header: r.Header,
+ Path: "/" + trial.path,
+ },
+ })
+ c.Assert(err, check.IsNil)
+ c.Assert(handler, check.NotNil)
+ rec := httptest.NewRecorder()
+ handler.ServeHTTP(rec, r)
+ resp := rec.Result()
+ c.Check(resp.StatusCode, check.Equals, trial.expectStatus)
+ for k := range trial.expectHeader {
+ c.Check(resp.Header[k], check.DeepEquals, trial.expectHeader[k])
+ }
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+ c.Check(string(buf), check.Matches, trial.expectBodyRe)
+ }
+}
+
+func (s *ContainerGatewaySuite) TestContainerRequestLogViaCadaver(c *check.C) {
+ s.setupLogCollection(c)
+
+ out := s.runCadaver(c, arvadostest.ActiveToken, "/arvados/v1/container_requests/"+s.reqUUID+"/log/"+s.ctrUUID, "ls")
+ c.Check(out, check.Matches, `(?ms).*stderr\.txt\s+12\s.*`)
+ c.Check(out, check.Matches, `(?ms).*a\s+0\s.*`)
+
+ out = s.runCadaver(c, arvadostest.ActiveTokenV2, "/arvados/v1/container_requests/"+s.reqUUID+"/log/"+s.ctrUUID, "get stderr.txt")
+ c.Check(out, check.Matches, `(?ms).*Downloading .* to stderr\.txt: .* succeeded\..*`)
+
+ s.saveLogAndCloseGateway(c)
+
+ out = s.runCadaver(c, arvadostest.ActiveTokenV2, "/arvados/v1/container_requests/"+s.reqUUID+"/log/"+s.ctrUUID, "get stderr.txt")
+ c.Check(out, check.Matches, `(?ms).*Downloading .* to stderr\.txt: .* succeeded\..*`)
+}
+
+func (s *ContainerGatewaySuite) runCadaver(c *check.C, password, path, stdin string) string {
+ // Replace s.srv with an HTTP server, otherwise cadaver will
+ // just fail on TLS cert verification.
+ s.srv.Close()
+ rtr := router.New(s.localdb, router.Config{})
+ s.srv = httptest.NewUnstartedServer(httpserver.AddRequestIDs(httpserver.LogRequests(rtr)))
+ s.srv.Start()
+
+ tempdir, err := ioutil.TempDir("", "localdb-test-")
+ c.Assert(err, check.IsNil)
+ defer os.RemoveAll(tempdir)
+
+ cmd := exec.Command("cadaver", s.srv.URL+path)
+ if password != "" {
+ cmd.Env = append(os.Environ(), "HOME="+tempdir)
+ f, err := os.OpenFile(filepath.Join(tempdir, ".netrc"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+ c.Assert(err, check.IsNil)
+ _, err = fmt.Fprintf(f, "default login none password %s\n", password)
+ c.Assert(err, check.IsNil)
+ c.Assert(f.Close(), check.IsNil)
+ }
+ cmd.Stdin = bytes.NewBufferString(stdin)
+ cmd.Dir = tempdir
+ stdout, err := cmd.StdoutPipe()
+ c.Assert(err, check.Equals, nil)
+ cmd.Stderr = cmd.Stdout
+ c.Logf("cmd: %v", cmd.Args)
+ go cmd.Start()
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, stdout)
+ c.Check(err, check.Equals, nil)
+ err = cmd.Wait()
+ c.Check(err, check.Equals, nil)
+ return buf.String()
+}
+
func (s *ContainerGatewaySuite) TestConnect(c *check.C) {
c.Logf("connecting to %s", s.gw.Address)
- sshconn, err := s.localdb.ContainerSSH(s.ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
+ sshconn, err := s.localdb.ContainerSSH(s.userctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
c.Assert(err, check.IsNil)
c.Assert(sshconn.Conn, check.NotNil)
defer sshconn.Conn.Close()
@@ -244,33 +552,33 @@ func (s *ContainerGatewaySuite) TestConnect(c *check.C) {
case <-time.After(time.Second):
c.Fail()
}
- ctr, err := s.localdb.ContainerGet(s.ctx, arvados.GetOptions{UUID: s.ctrUUID})
+ ctr, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.ctrUUID})
c.Check(err, check.IsNil)
c.Check(ctr.InteractiveSessionStarted, check.Equals, true)
}
func (s *ContainerGatewaySuite) TestConnectFail(c *check.C) {
c.Log("trying with no token")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{})
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, "")
_, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
c.Check(err, check.ErrorMatches, `.* 401 .*`)
c.Log("trying with anonymous token")
- ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.AnonymousToken}})
+ ctx = ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AnonymousToken)
_, err = s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
c.Check(err, check.ErrorMatches, `.* 404 .*`)
}
func (s *ContainerGatewaySuite) TestCreateTunnel(c *check.C) {
// no AuthSecret
- conn, err := s.localdb.ContainerGatewayTunnel(s.ctx, arvados.ContainerGatewayTunnelOptions{
+ conn, err := s.localdb.ContainerGatewayTunnel(s.userctx, arvados.ContainerGatewayTunnelOptions{
UUID: s.ctrUUID,
})
c.Check(err, check.ErrorMatches, `authentication error`)
c.Check(conn.Conn, check.IsNil)
// bogus AuthSecret
- conn, err = s.localdb.ContainerGatewayTunnel(s.ctx, arvados.ContainerGatewayTunnelOptions{
+ conn, err = s.localdb.ContainerGatewayTunnel(s.userctx, arvados.ContainerGatewayTunnelOptions{
UUID: s.ctrUUID,
AuthSecret: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
})
@@ -278,7 +586,7 @@ func (s *ContainerGatewaySuite) TestCreateTunnel(c *check.C) {
c.Check(conn.Conn, check.IsNil)
// good AuthSecret
- conn, err = s.localdb.ContainerGatewayTunnel(s.ctx, arvados.ContainerGatewayTunnelOptions{
+ conn, err = s.localdb.ContainerGatewayTunnel(s.userctx, arvados.ContainerGatewayTunnelOptions{
UUID: s.ctrUUID,
AuthSecret: s.gw.AuthSecret,
})
@@ -297,7 +605,7 @@ func (s *ContainerGatewaySuite) TestConnectThroughTunnelWithProxyOK(c *check.C)
func (s *ContainerGatewaySuite) TestConnectThroughTunnelWithProxyError(c *check.C) {
forceProxyForTest = true
defer func() { forceProxyForTest = false }()
- // forceInternalURLForTest shouldn't be used because it isn't
+ // forceInternalURLForTest will not be usable because it isn't
// listed in s.cluster.Services.Controller.InternalURLs
s.testConnectThroughTunnel(c, `.*tunnel endpoint is invalid.*`)
}
@@ -306,12 +614,11 @@ func (s *ContainerGatewaySuite) TestConnectThroughTunnelNoProxyOK(c *check.C) {
s.testConnectThroughTunnel(c, "")
}
-func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectErrorMatch string) {
- rootctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{s.cluster.SystemRootToken}})
+func (s *ContainerGatewaySuite) setupGatewayWithTunnel(c *check.C) *crunchrun.Gateway {
+ rootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)
// Until the tunnel starts up, set gateway_address to a value
// that can't work. We want to ensure the only way we can
// reach the gateway is through the tunnel.
- gwaddr := "127.0.0.1:0"
tungw := &crunchrun.Gateway{
ContainerUUID: s.ctrUUID,
AuthSecret: s.gw.AuthSecret,
@@ -320,7 +627,7 @@ func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectError
ArvadosClient: s.gw.ArvadosClient,
UpdateTunnelURL: func(url string) {
c.Logf("UpdateTunnelURL(%q)", url)
- gwaddr = "tunnel " + url
+ gwaddr := "tunnel " + url
s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{
UUID: s.ctrUUID,
Attrs: map[string]interface{}{
@@ -338,12 +645,12 @@ func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectError
_, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{
UUID: s.ctrUUID,
Attrs: map[string]interface{}{
- "state": arvados.ContainerStateRunning,
- "gateway_address": gwaddr}})
+ "state": arvados.ContainerStateRunning,
+ }})
c.Assert(err, check.IsNil)
for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(time.Second / 2) {
- ctr, err := s.localdb.ContainerGet(s.ctx, arvados.GetOptions{UUID: s.ctrUUID})
+ ctr, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.ctrUUID})
c.Assert(err, check.IsNil)
c.Check(ctr.InteractiveSessionStarted, check.Equals, false)
c.Logf("ctr.GatewayAddress == %s", ctr.GatewayAddress)
@@ -351,10 +658,14 @@ func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectError
break
}
}
+ return tungw
+}
+func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectErrorMatch string) {
+ s.setupGatewayWithTunnel(c)
c.Log("connecting to gateway through tunnel")
arpc := rpc.NewConn("", &url.URL{Scheme: "https", Host: s.gw.ArvadosClient.APIHost}, true, rpc.PassthroughTokenProvider)
- sshconn, err := arpc.ContainerSSH(s.ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
+ sshconn, err := arpc.ContainerSSH(s.userctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})
if expectErrorMatch != "" {
c.Check(err, check.ErrorMatches, expectErrorMatch)
return
@@ -389,7 +700,7 @@ func (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectError
case <-time.After(time.Second):
c.Fail()
}
- ctr, err := s.localdb.ContainerGet(s.ctx, arvados.GetOptions{UUID: s.ctrUUID})
+ ctr, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.ctrUUID})
c.Check(err, check.IsNil)
c.Check(ctr.InteractiveSessionStarted, check.Equals, true)
}
diff --git a/lib/controller/localdb/container_request.go b/lib/controller/localdb/container_request.go
index 5b2ce95da9..0234ee8fa6 100644
--- a/lib/controller/localdb/container_request.go
+++ b/lib/controller/localdb/container_request.go
@@ -6,13 +6,21 @@ package localdb
import (
"context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/scheduler"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
)
// ContainerRequestCreate defers to railsProxy for everything except
// vocabulary checking.
func (conn *Conn) ContainerRequestCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.ContainerRequest, error) {
+ conn.logActivity(ctx)
err := conn.checkProperties(ctx, opts.Attrs["properties"])
if err != nil {
return arvados.ContainerRequest{}, err
@@ -27,6 +35,7 @@ func (conn *Conn) ContainerRequestCreate(ctx context.Context, opts arvados.Creat
// ContainerRequestUpdate defers to railsProxy for everything except
// vocabulary checking.
func (conn *Conn) ContainerRequestUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+ conn.logActivity(ctx)
err := conn.checkProperties(ctx, opts.Attrs["properties"])
if err != nil {
return arvados.ContainerRequest{}, err
@@ -37,3 +46,102 @@ func (conn *Conn) ContainerRequestUpdate(ctx context.Context, opts arvados.Updat
}
return resp, nil
}
+
+func (conn *Conn) ContainerRequestGet(ctx context.Context, opts arvados.GetOptions) (arvados.ContainerRequest, error) {
+ conn.logActivity(ctx)
+ return conn.railsProxy.ContainerRequestGet(ctx, opts)
+}
+
+func (conn *Conn) ContainerRequestList(ctx context.Context, opts arvados.ListOptions) (arvados.ContainerRequestList, error) {
+ conn.logActivity(ctx)
+ return conn.railsProxy.ContainerRequestList(ctx, opts)
+}
+
+func (conn *Conn) ContainerRequestDelete(ctx context.Context, opts arvados.DeleteOptions) (arvados.ContainerRequest, error) {
+ conn.logActivity(ctx)
+ return conn.railsProxy.ContainerRequestDelete(ctx, opts)
+}
+
+func (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, opts arvados.GetOptions) (arvados.ContainerStatus, error) {
+ conn.logActivity(ctx)
+ var ret arvados.ContainerStatus
+ cr, err := conn.railsProxy.ContainerRequestGet(ctx, arvados.GetOptions{UUID: opts.UUID, Select: []string{"uuid", "container_uuid", "log_uuid"}})
+ if err != nil {
+ return ret, err
+ }
+ if cr.ContainerUUID == "" {
+ ret.SchedulingStatus = "no container assigned"
+ return ret, nil
+ }
+ // We use admin credentials to get the container record so we
+ // don't get an error when we're in a race with auto-retry and
+ // the container became user-unreadable since we fetched the
+ // CR above.
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})
+ ctr, err := conn.railsProxy.ContainerGet(ctxRoot, arvados.GetOptions{UUID: cr.ContainerUUID, Select: []string{"uuid", "state", "priority"}})
+ if err != nil {
+ return ret, err
+ }
+ ret.UUID = ctr.UUID
+ ret.State = ctr.State
+ if ctr.State != arvados.ContainerStateQueued && ctr.State != arvados.ContainerStateLocked {
+ // Scheduling status is not a thing once the container
+ // is in running state.
+ return ret, nil
+ }
+ var lastErr error
+ for dispatchurl := range conn.cluster.Services.DispatchCloud.InternalURLs {
+ baseurl := url.URL(dispatchurl)
+ apiurl, err := baseurl.Parse("/arvados/v1/dispatch/container?container_uuid=" + cr.ContainerUUID)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, apiurl.String(), nil)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ req.Header.Set("Authorization", "Bearer "+conn.cluster.ManagementToken)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ lastErr = fmt.Errorf("error getting status from dispatcher: %w", err)
+ continue
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ continue
+ } else if resp.StatusCode != http.StatusOK {
+ lastErr = fmt.Errorf("error getting status from dispatcher: %s", resp.Status)
+ continue
+ }
+ var qent scheduler.QueueEnt
+ err = json.NewDecoder(resp.Body).Decode(&qent)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ ret.State = qent.Container.State // Prefer dispatcher's view of state if not equal to ctr.State
+ ret.SchedulingStatus = qent.SchedulingStatus
+ return ret, nil
+ }
+ if lastErr != nil {
+ // If we got a non-nil error from a dispatchcloud
+ // service, and the container state suggests
+ // dispatchcloud should know about it, then we return
+ // an error so the client knows to retry.
+ return ret, httpserver.ErrorWithStatus(lastErr, http.StatusBadGateway)
+ }
+ // All running dispatchcloud services confirm they don't have
+ // this container (the dispatcher hasn't yet noticed it
+ // appearing in the queue) or there are no dispatchcloud
+ // services configured. Either way, all we can say is that
+ // it's queued.
+ if ctr.State == arvados.ContainerStateQueued && ctr.Priority < 1 {
+ // If it hasn't been picked up by a dispatcher
+ // already, it won't be -- it's just on hold.
+ // Scheduling status does not apply.
+ return ret, nil
+ }
+ ret.SchedulingStatus = "waiting for dispatch"
+ return ret, nil
+}
diff --git a/lib/controller/localdb/container_request_test.go b/lib/controller/localdb/container_request_test.go
index cca541a401..571b77f5e3 100644
--- a/lib/controller/localdb/container_request_test.go
+++ b/lib/controller/localdb/container_request_test.go
@@ -5,72 +5,18 @@
package localdb
import (
- "context"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&ContainerRequestSuite{})
type ContainerRequestSuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- railsSpy *arvadostest.Proxy
-}
-
-func (s *ContainerRequestSuite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
-}
-
-func (s *ContainerRequestSuite) SetUpTest(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
- s.localdb = NewConn(s.cluster)
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
-}
-
-func (s *ContainerRequestSuite) TearDownTest(c *check.C) {
- s.railsSpy.Close()
-}
-
-func (s *ContainerRequestSuite) setUpVocabulary(c *check.C, testVocabulary string) {
- if testVocabulary == "" {
- testVocabulary = `{
- "strict_tags": false,
- "tags": {
- "IDTAGIMPORTANCES": {
- "strict": true,
- "labels": [{"label": "Importance"}, {"label": "Priority"}],
- "values": {
- "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
- "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
- "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
- }
- }
- }
- }`
- }
- voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
- c.Assert(err, check.IsNil)
- s.localdb.vocabularyCache = voc
- s.cluster.API.VocabularyPath = "foo"
+ localdbSuite
}
func (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -85,7 +31,7 @@ func (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {
for _, tt := range tests {
c.Log(c.TestName()+" ", tt.name)
- cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+ cnt, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
"command": []string{"echo", "foo"},
@@ -116,7 +62,6 @@ func (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {
func (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -130,7 +75,7 @@ func (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {
}
for _, tt := range tests {
c.Log(c.TestName()+" ", tt.name)
- cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+ cnt, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"command": []string{"echo", "foo"},
"container_image": "arvados/apitestfixture:latest",
@@ -150,7 +95,7 @@ func (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {
},
})
c.Assert(err, check.IsNil)
- cnt, err = s.localdb.ContainerRequestUpdate(ctx, arvados.UpdateOptions{
+ cnt, err = s.localdb.ContainerRequestUpdate(s.userctx, arvados.UpdateOptions{
UUID: cnt.UUID,
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
diff --git a/lib/controller/localdb/container_test.go b/lib/controller/localdb/container_test.go
new file mode 100644
index 0000000000..86ae714ba9
--- /dev/null
+++ b/lib/controller/localdb/container_test.go
@@ -0,0 +1,319 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "math/rand"
+ "strings"
+ "sync"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&containerSuite{})
+
+type containerSuite struct {
+ localdbSuite
+ topcr arvados.ContainerRequest
+ topc arvados.Container
+ starttime time.Time
+}
+
+func (s *containerSuite) crAttrs(c *C) map[string]interface{} {
+ return map[string]interface{}{
+ "container_image": arvadostest.DockerImage112PDH,
+ "command": []string{c.TestName(), fmt.Sprintf("%d", s.starttime.UnixMilli()), "top"},
+ "output_path": "/out",
+ "priority": 1,
+ "state": "Committed",
+ "container_count_max": 1,
+ "runtime_constraints": arvados.RuntimeConstraints{
+ RAM: 1,
+ VCPUs: 1,
+ },
+ "mounts": map[string]arvados.Mount{
+ "/out": arvados.Mount{},
+ },
+ }
+}
+
+func (s *containerSuite) SetUpTest(c *C) {
+ containerPriorityUpdateInterval = 2 * time.Second
+ s.localdbSuite.SetUpTest(c)
+ s.starttime = time.Now()
+ var err error
+ s.topcr, err = s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{Attrs: s.crAttrs(c)})
+ c.Assert(err, IsNil)
+ s.topc, err = s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topcr.ContainerUUID})
+ c.Assert(err, IsNil)
+ c.Assert(int(s.topc.Priority), Not(Equals), 0)
+ c.Logf("topcr %s topc %s", s.topcr.UUID, s.topc.UUID)
+}
+
+func (s *containerSuite) TearDownTest(c *C) {
+ containerPriorityUpdateInterval = 5 * time.Minute
+ s.localdbSuite.TearDownTest(c)
+}
+
+func (s *containerSuite) syncUpdatePriority(c *C) {
+ // Sending 1x to the "update now" channel starts an update;
+ // sending again fills the channel while the first update is
+ // running; sending a third time blocks until the worker
+ // receives the 2nd send, i.e., guarantees that the first
+ // update has finished.
+ s.localdb.wantContainerPriorityUpdate <- struct{}{}
+ s.localdb.wantContainerPriorityUpdate <- struct{}{}
+ s.localdb.wantContainerPriorityUpdate <- struct{}{}
+}
+
+func (s *containerSuite) TestUpdatePriorityShouldBeNonZero(c *C) {
+ _, err := s.db.Exec("update containers set priority=0 where uuid=$1", s.topc.UUID)
+ c.Assert(err, IsNil)
+ topc, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})
+ c.Assert(err, IsNil)
+ c.Assert(int(topc.Priority), Equals, 0)
+ s.syncUpdatePriority(c)
+ topc, err = s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})
+ c.Assert(err, IsNil)
+ c.Check(int(topc.Priority), Not(Equals), 0)
+}
+
+func (s *containerSuite) TestUpdatePriorityShouldBeZero(c *C) {
+ _, err := s.db.Exec("update container_requests set priority=0 where uuid=$1", s.topcr.UUID)
+ c.Assert(err, IsNil)
+ topc, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})
+ c.Assert(err, IsNil)
+ c.Assert(int(topc.Priority), Not(Equals), 0)
+ s.syncUpdatePriority(c)
+ topc, err = s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})
+ c.Assert(err, IsNil)
+ c.Check(int(topc.Priority), Equals, 0)
+}
+
+func (s *containerSuite) TestUpdatePriorityMultiLevelWorkflow(c *C) {
+ testCtx, testCancel := context.WithDeadline(s.ctx, time.Now().Add(30*time.Second))
+ defer testCancel()
+ adminCtx := ctrlctx.NewWithToken(testCtx, s.cluster, s.cluster.SystemRootToken)
+
+ childCR := func(parent arvados.ContainerRequest, arg string) arvados.ContainerRequest {
+ attrs := s.crAttrs(c)
+ attrs["command"] = []string{c.TestName(), fmt.Sprintf("%d", s.starttime.UnixMilli()), arg}
+ cr, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{Attrs: attrs})
+ c.Assert(err, IsNil)
+ _, err = s.db.Exec("update container_requests set requesting_container_uuid=$1 where uuid=$2", parent.ContainerUUID, cr.UUID)
+ c.Assert(err, IsNil)
+ _, err = s.localdb.ContainerUpdate(adminCtx, arvados.UpdateOptions{
+ UUID: cr.ContainerUUID,
+ Attrs: map[string]interface{}{"state": "Locked"},
+ })
+ c.Assert(err, IsNil)
+ _, err = s.localdb.ContainerUpdate(adminCtx, arvados.UpdateOptions{
+ UUID: cr.ContainerUUID,
+ Attrs: map[string]interface{}{"state": "Running"},
+ })
+ c.Assert(err, IsNil)
+ return cr
+ }
+ // Build a tree of container requests and containers (3 levels
+ // deep below s.topcr)
+ allcrs := []arvados.ContainerRequest{s.topcr}
+ for i := 0; i < 2; i++ {
+ cri := childCR(s.topcr, fmt.Sprintf("i %d", i))
+ allcrs = append(allcrs, cri)
+ for j := 0; j < 3; j++ {
+ crj := childCR(cri, fmt.Sprintf("i %d j %d", i, j))
+ allcrs = append(allcrs, crj)
+ for k := 0; k < 4; k++ {
+ crk := childCR(crj, fmt.Sprintf("i %d j %d k %d", i, j, k))
+ allcrs = append(allcrs, crk)
+ }
+ }
+ }
+
+ // Set priority=0 on a parent+child, plus 18 other randomly
+ // selected containers in the tree
+ //
+ // First entries of needfix are allcrs[1] (which is "i 0") and
+ // allcrs[2] ("i 0 j 0") -- we want to make sure to get at
+ // least one parent/child pair -- and the rest were chosen
+ // randomly.
+ needfix := []int{1, 2, 23, 12, 20, 14, 13, 15, 7, 17, 6, 22, 21, 11, 1, 17, 18}
+ for n, i := range needfix {
+ needfix[n] = i
+ res, err := s.db.Exec("update containers set priority=0 where uuid=$1", allcrs[i].ContainerUUID)
+ c.Assert(err, IsNil)
+ updated, err := res.RowsAffected()
+ c.Assert(err, IsNil)
+ if n == 0 {
+ c.Assert(int(updated), Equals, 1)
+ }
+ }
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ chaosCtx, chaosCancel := context.WithCancel(adminCtx)
+ defer chaosCancel()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // Flood the api with ContainerUpdate calls for the
+ // same containers that need to have their priority
+ // fixed
+ for chaosCtx.Err() == nil {
+ n := rand.Intn(len(needfix))
+ _, err := s.localdb.ContainerUpdate(chaosCtx, arvados.UpdateOptions{
+ UUID: allcrs[needfix[n]].ContainerUUID,
+ Attrs: map[string]interface{}{
+ "runtime_status": map[string]string{
+ "info": time.Now().Format(time.RFC3339Nano),
+ },
+ },
+ })
+ if !errors.Is(err, context.Canceled) {
+ c.Check(err, IsNil)
+ }
+ }
+ }()
+ // Find and fix the containers with wrong priority
+ s.syncUpdatePriority(c)
+ // Ensure they all got fixed
+ for _, cr := range allcrs {
+ var priority int
+ err := s.db.QueryRow("select priority from containers where uuid=$1", cr.ContainerUUID).Scan(&priority)
+ c.Assert(err, IsNil)
+ c.Check(priority, Not(Equals), 0)
+ }
+ chaosCancel()
+
+ // Flood railsapi with priority updates. This can cause
+ // database deadlock: one call acquires row locks in the order
+ // {i0j0, i0, i0j1}, while another call acquires row locks in
+ // the order {i0j1, i0, i0j0}.
+ deadlockCtx, deadlockCancel := context.WithDeadline(adminCtx, time.Now().Add(30*time.Second))
+ defer deadlockCancel()
+ for _, cr := range allcrs {
+ if strings.Contains(cr.Command[2], " j ") && !strings.Contains(cr.Command[2], " k ") {
+ cr := cr
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for _, p := range []int{1, 2, 3, 4} {
+ var err error
+ for {
+ _, err = s.localdb.ContainerRequestUpdate(deadlockCtx, arvados.UpdateOptions{
+ UUID: cr.UUID,
+ Attrs: map[string]interface{}{
+ "priority": p,
+ },
+ })
+ c.Check(err, IsNil)
+ break
+ }
+ }
+ }()
+ }
+ }
+ wg.Wait()
+
+ // Simulate cascading cancellation of the entire tree. For
+ // this we need a goroutine to notice and cancel containers
+ // with state=Running and priority=0, and cancel them
+ // (this is normally done by a dispatcher).
+ dispCtx, dispCancel := context.WithCancel(adminCtx)
+ defer dispCancel()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for dispCtx.Err() == nil {
+ needcancel, err := s.localdb.ContainerList(dispCtx, arvados.ListOptions{
+ Limit: 10,
+ Filters: []arvados.Filter{{"state", "=", "Running"}, {"priority", "=", 0}},
+ })
+ if errors.Is(err, context.Canceled) {
+ break
+ }
+ c.Assert(err, IsNil)
+ for _, ctr := range needcancel.Items {
+ _, err := s.localdb.ContainerUpdate(dispCtx, arvados.UpdateOptions{
+ UUID: ctr.UUID,
+ Attrs: map[string]interface{}{
+ "state": "Cancelled",
+ },
+ })
+ if errors.Is(err, context.Canceled) {
+ break
+ }
+ c.Assert(err, IsNil)
+ }
+ time.Sleep(time.Second / 10)
+ }
+ }()
+
+ _, err := s.localdb.ContainerRequestUpdate(s.userctx, arvados.UpdateOptions{
+ UUID: s.topcr.UUID,
+ Attrs: map[string]interface{}{
+ "priority": 0,
+ },
+ })
+ c.Assert(err, IsNil)
+
+ c.Logf("waiting for all %d containers to have priority=0 after cancelling top level CR", len(allcrs))
+ for {
+ time.Sleep(time.Second / 2)
+ if testCtx.Err() != nil {
+ for i, cr := range allcrs {
+ var ctr arvados.Container
+ var command string
+ err = s.db.QueryRowContext(s.ctx, `select cr.priority, cr.state, cr.container_uuid, c.state, c.priority, cr.command
+ from container_requests cr
+ left join containers c on cr.container_uuid = c.uuid
+ where cr.uuid=$1`, cr.UUID).Scan(&cr.Priority, &cr.State, &ctr.UUID, &ctr.State, &ctr.Priority, &command)
+ c.Check(err, IsNil)
+ c.Logf("allcrs[%d] cr.pri %d %s c.pri %d %s cr.uuid %s c.uuid %s cmd %s", i, cr.Priority, cr.State, ctr.Priority, ctr.State, cr.UUID, ctr.UUID, command)
+ }
+ c.Fatal("timed out")
+ }
+ done := true
+ for _, cr := range allcrs {
+ var priority int
+ var crstate, command, ctrUUID string
+ var parent sql.NullString
+ err := s.db.QueryRowContext(s.ctx, `select state, priority, container_uuid, requesting_container_uuid, command
+ from container_requests where uuid=$1`, cr.UUID).Scan(&crstate, &priority, &ctrUUID, &parent, &command)
+ if errors.Is(err, context.Canceled) {
+ break
+ }
+ c.Assert(err, IsNil)
+ if crstate == "Committed" && priority > 0 {
+ c.Logf("container request %s (%s; parent=%s) still has state %s priority %d", cr.UUID, command, parent.String, crstate, priority)
+ done = false
+ break
+ }
+ err = s.db.QueryRowContext(s.ctx, "select priority, command from containers where uuid=$1", cr.ContainerUUID).Scan(&priority, &command)
+ if errors.Is(err, context.Canceled) {
+ break
+ }
+ c.Assert(err, IsNil)
+ if priority > 0 {
+ c.Logf("container %s (%s) still has priority %d", cr.ContainerUUID, command, priority)
+ done = false
+ break
+ }
+ }
+ if done {
+ c.Logf("success -- all %d containers have priority=0", len(allcrs))
+ break
+ }
+ }
+}
diff --git a/lib/controller/localdb/group.go b/lib/controller/localdb/group.go
index 0d77bdbd9c..418fd6b8b7 100644
--- a/lib/controller/localdb/group.go
+++ b/lib/controller/localdb/group.go
@@ -6,6 +6,8 @@ package localdb
import (
"context"
+ "fmt"
+ "strings"
"git.arvados.org/arvados.git/sdk/go/arvados"
)
@@ -13,6 +15,7 @@ import (
// GroupCreate defers to railsProxy for everything except vocabulary
// checking.
func (conn *Conn) GroupCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Group, error) {
+ conn.logActivity(ctx)
err := conn.checkProperties(ctx, opts.Attrs["properties"])
if err != nil {
return arvados.Group{}, err
@@ -24,9 +27,15 @@ func (conn *Conn) GroupCreate(ctx context.Context, opts arvados.CreateOptions) (
return resp, nil
}
+func (conn *Conn) GroupGet(ctx context.Context, opts arvados.GetOptions) (arvados.Group, error) {
+ conn.logActivity(ctx)
+ return conn.railsProxy.GroupGet(ctx, opts)
+}
+
// GroupUpdate defers to railsProxy for everything except vocabulary
// checking.
func (conn *Conn) GroupUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Group, error) {
+ conn.logActivity(ctx)
err := conn.checkProperties(ctx, opts.Attrs["properties"])
if err != nil {
return arvados.Group{}, err
@@ -37,3 +46,63 @@ func (conn *Conn) GroupUpdate(ctx context.Context, opts arvados.UpdateOptions) (
}
return resp, nil
}
+
+func (conn *Conn) GroupList(ctx context.Context, opts arvados.ListOptions) (arvados.GroupList, error) {
+ conn.logActivity(ctx)
+ return conn.railsProxy.GroupList(ctx, opts)
+}
+
+func (conn *Conn) GroupDelete(ctx context.Context, opts arvados.DeleteOptions) (arvados.Group, error) {
+ conn.logActivity(ctx)
+ return conn.railsProxy.GroupDelete(ctx, opts)
+}
+
+func (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupContentsOptions) (arvados.ObjectList, error) {
+ conn.logActivity(ctx)
+
+ // The requested UUID can be a user (virtual home project), which we just pass on to
+ // the API server.
+ if strings.Index(options.UUID, "-j7d0g-") != 5 {
+ return conn.railsProxy.GroupContents(ctx, options)
+ }
+
+ var resp arvados.ObjectList
+
+ // Get the group object
+ respGroup, err := conn.GroupGet(ctx, arvados.GetOptions{UUID: options.UUID})
+ if err != nil {
+ return resp, err
+ }
+
+ // If the group has groupClass 'filter', apply the filters before getting the contents.
+ if respGroup.GroupClass == "filter" {
+ if filters, ok := respGroup.Properties["filters"].([]interface{}); ok {
+ for _, f := range filters {
+ // f is supposed to be a []string
+ tmp, ok2 := f.([]interface{})
+ if !ok2 || len(tmp) < 3 {
+ return resp, fmt.Errorf("filter unparsable: %T, %+v, original field: %T, %+v\n", tmp, tmp, f, f)
+ }
+ var filter arvados.Filter
+ if attr, ok2 := tmp[0].(string); ok2 {
+ filter.Attr = attr
+ } else {
+ return resp, fmt.Errorf("filter unparsable: attribute must be string: %T, %+v, filter: %T, %+v\n", tmp[0], tmp[0], f, f)
+ }
+ if operator, ok2 := tmp[1].(string); ok2 {
+ filter.Operator = operator
+ } else {
+ return resp, fmt.Errorf("filter unparsable: operator must be string: %T, %+v, filter: %T, %+v\n", tmp[1], tmp[1], f, f)
+ }
+ filter.Operand = tmp[2]
+ options.Filters = append(options.Filters, filter)
+ }
+ } else {
+ return resp, fmt.Errorf("filter unparsable: not an array\n")
+ }
+ // Use the generic /groups/contents endpoint for filter groups
+ options.UUID = ""
+ }
+
+ return conn.railsProxy.GroupContents(ctx, options)
+}
diff --git a/lib/controller/localdb/group_test.go b/lib/controller/localdb/group_test.go
index 78150c9552..7de36e1c58 100644
--- a/lib/controller/localdb/group_test.go
+++ b/lib/controller/localdb/group_test.go
@@ -5,69 +5,20 @@
package localdb
import (
- "context"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&GroupSuite{})
type GroupSuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- railsSpy *arvadostest.Proxy
-}
-
-func (s *GroupSuite) SetUpSuite(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
- s.localdb = NewConn(s.cluster)
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
-}
-
-func (s *GroupSuite) TearDownSuite(c *check.C) {
- s.railsSpy.Close()
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
-}
-
-func (s *GroupSuite) setUpVocabulary(c *check.C, testVocabulary string) {
- if testVocabulary == "" {
- testVocabulary = `{
- "strict_tags": false,
- "tags": {
- "IDTAGIMPORTANCES": {
- "strict": true,
- "labels": [{"label": "Importance"}, {"label": "Priority"}],
- "values": {
- "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
- "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
- "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
- }
- }
- }
- }`
- }
- voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
- c.Assert(err, check.IsNil)
- s.localdb.vocabularyCache = voc
- s.cluster.API.VocabularyPath = "foo"
+ localdbSuite
}
func (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -82,7 +33,7 @@ func (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {
for _, tt := range tests {
c.Log(c.TestName()+" ", tt.name)
- grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+ grp, err := s.localdb.GroupCreate(s.userctx, arvados.CreateOptions{
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
"group_class": "project",
@@ -99,7 +50,6 @@ func (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {
func (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -113,13 +63,13 @@ func (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {
}
for _, tt := range tests {
c.Log(c.TestName()+" ", tt.name)
- grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+ grp, err := s.localdb.GroupCreate(s.userctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"group_class": "project",
},
})
c.Assert(err, check.IsNil)
- grp, err = s.localdb.GroupUpdate(ctx, arvados.UpdateOptions{
+ grp, err = s.localdb.GroupUpdate(s.userctx, arvados.UpdateOptions{
UUID: grp.UUID,
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
@@ -135,9 +85,9 @@ func (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {
}
func (s *GroupSuite) TestCanWriteCanManageResponses(c *check.C) {
- ctxUser1 := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
- ctxUser2 := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.SpectatorToken}})
- ctxAdmin := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.AdminToken}})
+ ctxUser1 := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.ActiveTokenV2)
+ ctxUser2 := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.SpectatorToken)
+ ctxAdmin := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)
project, err := s.localdb.GroupCreate(ctxUser1, arvados.CreateOptions{
Attrs: map[string]interface{}{
"group_class": "project",
diff --git a/lib/controller/localdb/link_test.go b/lib/controller/localdb/link_test.go
index 2f07fb459e..5d0fe3d6b2 100644
--- a/lib/controller/localdb/link_test.go
+++ b/lib/controller/localdb/link_test.go
@@ -5,72 +5,19 @@
package localdb
import (
- "context"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&LinkSuite{})
type LinkSuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- railsSpy *arvadostest.Proxy
-}
-
-func (s *LinkSuite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
-}
-
-func (s *LinkSuite) SetUpTest(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
- s.localdb = NewConn(s.cluster)
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
-}
-
-func (s *LinkSuite) TearDownTest(c *check.C) {
- s.railsSpy.Close()
-}
-
-func (s *LinkSuite) setUpVocabulary(c *check.C, testVocabulary string) {
- if testVocabulary == "" {
- testVocabulary = `{
- "strict_tags": false,
- "tags": {
- "IDTAGIMPORTANCES": {
- "strict": true,
- "labels": [{"label": "Importance"}, {"label": "Priority"}],
- "values": {
- "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
- "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
- "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
- }
- }
- }
- }`
- }
- voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
- c.Assert(err, check.IsNil)
- s.localdb.vocabularyCache = voc
- s.cluster.API.VocabularyPath = "foo"
+ localdbSuite
}
func (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -85,7 +32,7 @@ func (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {
for _, tt := range tests {
c.Log(c.TestName()+" ", tt.name)
- lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+ lnk, err := s.localdb.LinkCreate(s.userctx, arvados.CreateOptions{
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
"link_class": "star",
@@ -104,7 +51,6 @@ func (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {
func (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {
s.setUpVocabulary(c, "")
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
tests := []struct {
name string
@@ -118,7 +64,7 @@ func (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {
}
for _, tt := range tests {
c.Log(c.TestName()+" ", tt.name)
- lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+ lnk, err := s.localdb.LinkCreate(s.userctx, arvados.CreateOptions{
Attrs: map[string]interface{}{
"link_class": "star",
"tail_uuid": "zzzzz-j7d0g-publicfavorites",
@@ -126,7 +72,7 @@ func (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {
},
})
c.Assert(err, check.IsNil)
- lnk, err = s.localdb.LinkUpdate(ctx, arvados.UpdateOptions{
+ lnk, err = s.localdb.LinkUpdate(s.userctx, arvados.UpdateOptions{
UUID: lnk.UUID,
Select: []string{"uuid", "properties"},
Attrs: map[string]interface{}{
diff --git a/lib/controller/localdb/localdb_test.go b/lib/controller/localdb/localdb_test.go
new file mode 100644
index 0000000000..053031a8cf
--- /dev/null
+++ b/lib/controller/localdb/localdb_test.go
@@ -0,0 +1,105 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "errors"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/jmoiron/sqlx"
+ check "gopkg.in/check.v1"
+)
+
+type localdbSuite struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ cluster *arvados.Cluster
+ db *sqlx.DB
+ dbConnector *ctrlctx.DBConnector
+ tx *sqlx.Tx
+ txFinish func(*error)
+ userctx context.Context // uses ActiveUser token
+ localdb *Conn
+ railsSpy *arvadostest.Proxy
+}
+
+func (s *localdbSuite) SetUpSuite(c *check.C) {
+ arvadostest.StartKeep(2, true)
+}
+
+func (s *localdbSuite) TearDownSuite(c *check.C) {
+ // Undo any changes/additions to the user database so they
+ // don't affect subsequent tests.
+ arvadostest.ResetEnv()
+ c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *localdbSuite) SetUpTest(c *check.C) {
+ *s = localdbSuite{}
+ logger := ctxlog.TestLogger(c)
+ s.ctx, s.cancel = context.WithCancel(context.Background())
+ s.ctx = ctxlog.Context(s.ctx, logger)
+ cfg, err := config.NewLoader(nil, logger).Load()
+ c.Assert(err, check.IsNil)
+ s.cluster, err = cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ s.dbConnector = &ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}
+ s.db, err = s.dbConnector.GetDB(s.ctx)
+ c.Assert(err, check.IsNil)
+ s.ctx, s.txFinish = ctrlctx.New(s.ctx, s.dbConnector.GetDB)
+ s.tx, err = ctrlctx.CurrentTx(s.ctx)
+ c.Assert(err, check.IsNil)
+ s.localdb = NewConn(s.ctx, s.cluster, s.dbConnector.GetDB)
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+ s.userctx = ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.ActiveTokenV2)
+}
+
+var errRollbackAfterTest = errors.New("rollback after test")
+
+func (s *localdbSuite) TearDownTest(c *check.C) {
+ if s.tx != nil {
+ s.tx.Rollback()
+ }
+ if s.txFinish != nil {
+ s.txFinish(&errRollbackAfterTest)
+ }
+ if s.railsSpy != nil {
+ s.railsSpy.Close()
+ }
+ if s.dbConnector != nil {
+ s.dbConnector.Close()
+ }
+ s.cancel()
+}
+
+func (s *localdbSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+ if testVocabulary == "" {
+ testVocabulary = `{
+ "strict_tags": false,
+ "tags": {
+ "IDTAGIMPORTANCES": {
+ "strict": true,
+ "labels": [{"label": "Importance"}, {"label": "Priority"}],
+ "values": {
+ "IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+ "IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+ "IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+ }
+ }
+ }
+ }`
+ }
+ voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+ c.Assert(err, check.IsNil)
+ s.localdb.vocabularyCache = voc
+ s.cluster.API.VocabularyPath = "foo"
+}
diff --git a/lib/controller/localdb/log_activity.go b/lib/controller/localdb/log_activity.go
new file mode 100644
index 0000000000..9c9660aec4
--- /dev/null
+++ b/lib/controller/localdb/log_activity.go
@@ -0,0 +1,117 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/ctrlctx"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+)
+
+func (conn *Conn) logActivity(ctx context.Context) {
+ p := conn.cluster.Users.ActivityLoggingPeriod.Duration()
+ if p < 1 {
+ ctxlog.FromContext(ctx).Debug("logActivity disabled by config")
+ return
+ }
+ user, _, err := ctrlctx.CurrentAuth(ctx)
+ if err == ctrlctx.ErrUnauthenticated {
+ ctxlog.FromContext(ctx).Debug("logActivity skipped for unauthenticated request")
+ return
+ } else if err != nil {
+ ctxlog.FromContext(ctx).WithError(err).Error("logActivity CurrentAuth failed")
+ return
+ }
+ now := time.Now()
+ conn.activeUsersLock.Lock()
+ if conn.activeUsers == nil || conn.activeUsersReset.IsZero() || conn.activeUsersReset.Before(now) {
+ conn.activeUsersReset = alignedPeriod(now, p)
+ conn.activeUsers = map[string]bool{}
+ }
+ logged := conn.activeUsers[user.UUID]
+ if !logged {
+ // Prevent other concurrent calls from logging about
+ // this user until we finish.
+ conn.activeUsers[user.UUID] = true
+ }
+ conn.activeUsersLock.Unlock()
+ if logged {
+ return
+ }
+ defer func() {
+ // If we return without logging, reset the flag so we
+ // try again on the user's next API call.
+ if !logged {
+ conn.activeUsersLock.Lock()
+ conn.activeUsers[user.UUID] = false
+ conn.activeUsersLock.Unlock()
+ }
+ }()
+
+ tx, err := ctrlctx.NewTx(ctx)
+ if err != nil {
+ ctxlog.FromContext(ctx).WithError(err).Error("logActivity NewTx failed")
+ return
+ }
+ defer tx.Rollback()
+ _, err = tx.ExecContext(ctx, `
+insert into logs
+ (uuid,
+ owner_uuid, modified_by_user_uuid, object_owner_uuid,
+ event_type,
+ summary,
+ object_uuid,
+ properties,
+ event_at, created_at, updated_at, modified_at)
+ values
+ ($1, $2, $2, $2, $3, $4, $5, $6,
+ current_timestamp at time zone 'UTC',
+ current_timestamp at time zone 'UTC',
+ current_timestamp at time zone 'UTC',
+ current_timestamp at time zone 'UTC')
+ returning id`,
+ arvados.RandomUUID(conn.cluster.ClusterID, "57u5n"),
+ conn.cluster.ClusterID+"-tpzed-000000000000000", // both modified_by and object_owner
+ "activity",
+ "activity of "+user.UUID,
+ user.UUID,
+ "{}")
+ if err != nil {
+ ctxlog.FromContext(ctx).WithError(err).Error("logActivity query failed")
+ return
+ }
+ err = tx.Commit()
+ if err != nil {
+ ctxlog.FromContext(ctx).WithError(err).Error("logActivity commit failed")
+ return
+ }
+ logged = true
+}
+
+// alignedPeriod computes a time interval that includes now and aligns
+// to local clock times that are multiples of p. For example, if local
+// time is UTC-5 and ActivityLoggingPeriod=4h, periodStart and
+// periodEnd will be 0000-0400, 0400-0800, etc., in local time. If p
+// is a multiple of 24h, periods will start and end at midnight.
+//
+// If DST starts or ends during this period, the boundaries will be
+// aligned based on either DST or non-DST time depending on whether
+// now is before or after the DST transition. The consequences are
+// presumed to be inconsequential, e.g., logActivity may unnecessarily
+// log activity more than once in a period that includes a DST
+// transition.
+//
+// In all cases, the period ends in the future.
+//
+// Only the end of the period is returned.
+func alignedPeriod(now time.Time, p time.Duration) time.Time {
+ _, tzsec := now.Zone()
+ tzoff := time.Duration(tzsec) * time.Second
+ periodStart := now.Add(tzoff).Truncate(p).Add(-tzoff)
+ return periodStart.Add(p)
+}
diff --git a/lib/controller/localdb/log_activity_test.go b/lib/controller/localdb/log_activity_test.go
new file mode 100644
index 0000000000..92624e4508
--- /dev/null
+++ b/lib/controller/localdb/log_activity_test.go
@@ -0,0 +1,74 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "database/sql"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&activityPeriodSuite{})
+
+type activityPeriodSuite struct{}
+
+// The important thing is that, even when daylight savings time is
+// making things difficult, the current period ends in the future.
+func (*activityPeriodSuite) TestPeriod(c *check.C) {
+ toronto, err := time.LoadLocation("America/Toronto")
+ c.Assert(err, check.IsNil)
+
+ format := "2006-01-02 15:04:05 MST"
+ dststartday, err := time.ParseInLocation(format, "2022-03-13 00:00:00 EST", toronto)
+ c.Assert(err, check.IsNil)
+ dstendday, err := time.ParseInLocation(format, "2022-11-06 00:00:00 EDT", toronto)
+ c.Assert(err, check.IsNil)
+
+ for _, period := range []time.Duration{
+ time.Minute * 13,
+ time.Minute * 49,
+ time.Hour,
+ 4 * time.Hour,
+ 48 * time.Hour,
+ } {
+ for offset := time.Duration(0); offset < 48*time.Hour; offset += 3 * time.Minute {
+ t := dststartday.Add(offset)
+ end := alignedPeriod(t, period)
+ c.Check(end.After(t), check.Equals, true, check.Commentf("period %v offset %v", period, offset))
+
+ t = dstendday.Add(offset)
+ end = alignedPeriod(t, period)
+ c.Check(end.After(t), check.Equals, true, check.Commentf("period %v offset %v", period, offset))
+ }
+ }
+}
+
+func (s *CollectionSuite) TestLogActivity(c *check.C) {
+ starttime := time.Now()
+ s.localdb.activeUsersLock.Lock()
+ s.localdb.activeUsersReset = starttime
+ s.localdb.activeUsersLock.Unlock()
+ for i := 0; i < 2; i++ {
+ logthreshold := time.Now()
+ _, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{
+ Attrs: map[string]interface{}{
+ "name": "test collection",
+ },
+ EnsureUniqueName: true,
+ })
+ c.Assert(err, check.IsNil)
+ var uuid string
+ err = s.db.QueryRowContext(s.ctx, `select uuid from logs where object_uuid = $1 and event_at > $2`, arvadostest.ActiveUserUUID, logthreshold.UTC()).Scan(&uuid)
+ if i == 0 {
+ c.Check(err, check.IsNil)
+ c.Check(uuid, check.HasLen, 27)
+ } else {
+ c.Check(err, check.Equals, sql.ErrNoRows)
+ }
+ }
+}
diff --git a/lib/controller/localdb/login.go b/lib/controller/localdb/login.go
index 2b20491a04..f9b968a705 100644
--- a/lib/controller/localdb/login.go
+++ b/lib/controller/localdb/login.go
@@ -10,6 +10,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "net"
"net/http"
"net/url"
"strings"
@@ -162,3 +163,68 @@ func (conn *Conn) CreateAPIClientAuthorization(ctx context.Context, rootToken st
}
return
}
+
+var errUserinfoInRedirectTarget = errors.New("redirect target rejected because it contains userinfo")
+
+func validateLoginRedirectTarget(cluster *arvados.Cluster, returnTo string) error {
+ u, err := url.Parse(returnTo)
+ if err != nil {
+ return err
+ }
+ u, err = u.Parse("/")
+ if err != nil {
+ return err
+ }
+ if u.User != nil {
+ return errUserinfoInRedirectTarget
+ }
+ target := origin(*u)
+ for trusted := range cluster.Login.TrustedClients {
+ trustedOrigin := origin(url.URL(trusted))
+ if trustedOrigin == target {
+ return nil
+ }
+ // If TrustedClients has https://*.bar.example, we
+ // trust https://foo.bar.example. Note origin() has
+ // already stripped the incoming Path, so we won't
+ // accidentally trust
+ // https://attacker.example/pwn.bar.example here. See
+ // tests.
+ if strings.HasPrefix(trustedOrigin, u.Scheme+"://*.") && strings.HasSuffix(target, trustedOrigin[len(u.Scheme)+4:]) {
+ return nil
+ }
+ }
+ if target == origin(url.URL(cluster.Services.Workbench1.ExternalURL)) ||
+ target == origin(url.URL(cluster.Services.Workbench2.ExternalURL)) {
+ return nil
+ }
+ if cluster.Login.TrustPrivateNetworks {
+ if u.Hostname() == "localhost" {
+ return nil
+ }
+ if ip := net.ParseIP(u.Hostname()); len(ip) > 0 {
+ for _, n := range privateNetworks {
+ if n.Contains(ip) {
+ return nil
+ }
+ }
+ }
+ }
+ return fmt.Errorf("requesting site is not listed in TrustedClients config")
+}
+
+// origin returns the canonical origin of a URL, e.g.,
+// origin("https://example:443/foo") returns "https://example/"
+func origin(u url.URL) string {
+ origin := url.URL{
+ Scheme: u.Scheme,
+ Host: u.Host,
+ Path: "/",
+ }
+ if origin.Port() == "80" && origin.Scheme == "http" {
+ origin.Host = origin.Hostname()
+ } else if origin.Port() == "443" && origin.Scheme == "https" {
+ origin.Host = origin.Hostname()
+ }
+ return origin.String()
+}
diff --git a/lib/controller/localdb/login_ldap.go b/lib/controller/localdb/login_ldap.go
index 3f13c7b27a..df3982c85f 100644
--- a/lib/controller/localdb/login_ldap.go
+++ b/lib/controller/localdb/login_ldap.go
@@ -47,7 +47,25 @@ func (ctrl *ldapLoginController) UserAuthenticate(ctx context.Context, opts arva
}
log = log.WithField("URL", conf.URL.String())
- l, err := ldap.DialURL(conf.URL.String())
+ var l *ldap.Conn
+ var err error
+ if conf.URL.Scheme == "ldaps" {
+ // ldap.DialURL does not currently allow us to control
+ // tls.Config, so we need to figure out the port
+ // ourselves and call DialTLS.
+ host, port, err := net.SplitHostPort(conf.URL.Host)
+ if err != nil {
+ // Assume error means no port given
+ host = conf.URL.Host
+ port = ldap.DefaultLdapsPort
+ }
+ l, err = ldap.DialTLS("tcp", net.JoinHostPort(host, port), &tls.Config{
+ ServerName: host,
+ MinVersion: uint16(conf.MinTLSVersion),
+ })
+ } else {
+ l, err = ldap.DialURL(conf.URL.String())
+ }
if err != nil {
log.WithError(err).Error("ldap connection failed")
return arvados.APIClientAuthorization{}, err
@@ -56,6 +74,7 @@ func (ctrl *ldapLoginController) UserAuthenticate(ctx context.Context, opts arva
if conf.StartTLS {
var tlsconfig tls.Config
+ tlsconfig.MinVersion = uint16(conf.MinTLSVersion)
if conf.InsecureTLS {
tlsconfig.InsecureSkipVerify = true
} else {
diff --git a/lib/controller/localdb/login_ldap_docker_test.sh b/lib/controller/localdb/login_ldap_docker_test.sh
index 43f2ec0d77..c539e0e60b 100755
--- a/lib/controller/localdb/login_ldap_docker_test.sh
+++ b/lib/controller/localdb/login_ldap_docker_test.sh
@@ -160,7 +160,7 @@ objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: top
objectClass: shadowAccount
-shadowMax: 180
+shadowMax: -1
shadowMin: 1
shadowWarning: 7
shadowLastChange: 10701
@@ -169,6 +169,26 @@ uidNumber: 11111
gidNumber: 11111
homeDirectory: /home/foo-bar
userPassword: ${passwordhash}
+
+dn: uid=expired,dc=example,dc=org
+uid: expired
+cn: "Exp Ired"
+givenName: Exp
+sn: Ired
+mail: expired@example.com
+objectClass: inetOrgPerson
+objectClass: posixAccount
+objectClass: top
+objectClass: shadowAccount
+shadowMax: 180
+shadowMin: 1
+shadowWarning: 7
+shadowLastChange: 10701
+loginShell: /bin/bash
+uidNumber: 11112
+gidNumber: 11111
+homeDirectory: /home/expired
+userPassword: ${passwordhash}
EOF
echo >&2 "Adding example user entry user=foo-bar pass=secret (retrying until server comes up)"
@@ -188,7 +208,7 @@ docker run --detach --rm --name=${ctrlctr} \
-v "${tmpdir}/arvados-server":/bin/arvados-server:ro \
-v "${tmpdir}/zzzzz.yml":/etc/arvados/config.yml:ro \
-v $(realpath "${PWD}/../../.."):/arvados:ro \
- debian:10 \
+ debian:11 \
bash -c "${setup_pam_ldap:-true} && arvados-server controller"
docker logs --follow ${ctrlctr} 2>$debug >$debug &
ctrlhostports=$(docker port ${ctrlctr} 9999/tcp)
@@ -227,6 +247,13 @@ else
check_contains "${resp}" '{"errors":["PAM: Authentication failure (with username \"foo-bar\" and password)"]}'
fi
+if [[ "${config_method}" = pam ]]; then
+ echo >&2 "Testing expired credentials"
+ resp="$(set -x; curl -s --include -d username=expired -d password=secret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
+ check_contains "${resp}" "HTTP/1.1 401"
+ check_contains "${resp}" '{"errors":["PAM: Authentication failure; \"You are required to change your LDAP password immediately.\""]}'
+fi
+
echo >&2 "Testing authentication success"
resp="$(set -x; curl -s --include -d username=foo-bar -d password=secret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
check_contains "${resp}" "HTTP/1.1 200"
diff --git a/lib/controller/localdb/login_ldap_test.go b/lib/controller/localdb/login_ldap_test.go
index b8ba6b4676..c7d8390225 100644
--- a/lib/controller/localdb/login_ldap_test.go
+++ b/lib/controller/localdb/login_ldap_test.go
@@ -5,48 +5,27 @@
package localdb
import (
- "context"
"encoding/json"
"net"
"net/http"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/controller/railsproxy"
"git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/bradleypeabody/godap"
- "github.com/jmoiron/sqlx"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&LDAPSuite{})
type LDAPSuite struct {
- cluster *arvados.Cluster
- ctrl *ldapLoginController
- ldap *godap.LDAPServer // fake ldap server that accepts auth goodusername/goodpassword
- db *sqlx.DB
-
- // transaction context
- ctx context.Context
- rollback func() error
-}
-
-func (s *LDAPSuite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+ localdbSuite
+ ldap *godap.LDAPServer // fake ldap server that accepts auth goodusername/goodpassword
}
-func (s *LDAPSuite) SetUpSuite(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
+func (s *LDAPSuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
ln, err := net.Listen("tcp", "127.0.0.1:0")
c.Assert(err, check.IsNil)
@@ -84,35 +63,19 @@ func (s *LDAPSuite) SetUpSuite(c *check.C) {
s.cluster.Login.LDAP.Enable = true
err = json.Unmarshal([]byte(`"ldap://`+ln.Addr().String()+`"`), &s.cluster.Login.LDAP.URL)
+ c.Assert(err, check.IsNil)
s.cluster.Login.LDAP.StartTLS = false
s.cluster.Login.LDAP.SearchBindUser = "cn=goodusername,dc=example,dc=com"
s.cluster.Login.LDAP.SearchBindPassword = "goodpassword"
s.cluster.Login.LDAP.SearchBase = "dc=example,dc=com"
- c.Assert(err, check.IsNil)
- s.ctrl = &ldapLoginController{
+ s.localdb.loginController = &ldapLoginController{
Cluster: s.cluster,
- Parent: &Conn{railsProxy: railsproxy.NewConn(s.cluster)},
- }
- s.db = arvadostest.DB(c, s.cluster)
-}
-
-func (s *LDAPSuite) SetUpTest(c *check.C) {
- tx, err := s.db.Beginx()
- c.Assert(err, check.IsNil)
- s.ctx = ctrlctx.NewWithTransaction(context.Background(), tx)
- s.rollback = tx.Rollback
-}
-
-func (s *LDAPSuite) TearDownTest(c *check.C) {
- if s.rollback != nil {
- s.rollback()
+ Parent: s.localdb,
}
}
func (s *LDAPSuite) TestLoginSuccess(c *check.C) {
- conn := NewConn(s.cluster)
- conn.loginController = s.ctrl
- resp, err := conn.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
+ resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: "goodusername",
Password: "goodpassword",
})
@@ -121,7 +84,7 @@ func (s *LDAPSuite) TestLoginSuccess(c *check.C) {
c.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)
c.Check(resp.Scopes, check.DeepEquals, []string{"all"})
- ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{"v2/" + resp.UUID + "/" + resp.APIToken}})
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, "v2/"+resp.UUID+"/"+resp.APIToken)
user, err := railsproxy.NewConn(s.cluster).UserGetCurrent(ctx, arvados.GetOptions{})
c.Check(err, check.IsNil)
c.Check(user.Email, check.Equals, "goodusername@example.com")
@@ -131,7 +94,7 @@ func (s *LDAPSuite) TestLoginSuccess(c *check.C) {
func (s *LDAPSuite) TestLoginFailure(c *check.C) {
// search returns no results
s.cluster.Login.LDAP.SearchBase = "dc=example,dc=invalid"
- resp, err := s.ctrl.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
+ resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: "goodusername",
Password: "goodpassword",
})
@@ -144,7 +107,7 @@ func (s *LDAPSuite) TestLoginFailure(c *check.C) {
// search returns result, but auth fails
s.cluster.Login.LDAP.SearchBase = "dc=example,dc=com"
- resp, err = s.ctrl.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
+ resp, err = s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: "badusername",
Password: "badpassword",
})
diff --git a/lib/controller/localdb/login_oidc.go b/lib/controller/localdb/login_oidc.go
index 6d6f80f39c..d91cdddc01 100644
--- a/lib/controller/localdb/login_oidc.go
+++ b/lib/controller/localdb/login_oidc.go
@@ -14,8 +14,10 @@ import (
"errors"
"fmt"
"io"
+ "net"
"net/http"
"net/url"
+ "regexp"
"strings"
"sync"
"text/template"
@@ -28,7 +30,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
- "github.com/coreos/go-oidc"
+ "github.com/coreos/go-oidc/v3/oidc"
lru "github.com/hashicorp/golang-lru"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
@@ -66,10 +68,11 @@ type oidcLoginController struct {
// https://people.googleapis.com/)
peopleAPIBasePath string
- provider *oidc.Provider // initialized by setup()
- oauth2conf *oauth2.Config // initialized by setup()
- verifier *oidc.IDTokenVerifier // initialized by setup()
- mu sync.Mutex // protects setup()
+ provider *oidc.Provider // initialized by setup()
+ endSessionURL *url.URL // initialized by setup()
+ oauth2conf *oauth2.Config // initialized by setup()
+ verifier *oidc.IDTokenVerifier // initialized by setup()
+ mu sync.Mutex // protects setup()
}
// Initialize ctrl.provider and ctrl.oauth2conf.
@@ -99,11 +102,46 @@ func (ctrl *oidcLoginController) setup() error {
ClientID: ctrl.ClientID,
})
ctrl.provider = provider
+ var claims struct {
+ EndSessionEndpoint string `json:"end_session_endpoint"`
+ }
+ err = provider.Claims(&claims)
+ if err != nil {
+ return fmt.Errorf("error parsing OIDC discovery metadata: %v", err)
+ } else if claims.EndSessionEndpoint == "" {
+ ctrl.endSessionURL = nil
+ } else {
+ u, err := url.Parse(claims.EndSessionEndpoint)
+ if err != nil {
+ return fmt.Errorf("OIDC end_session_endpoint is not a valid URL: %v", err)
+ } else if u.Scheme != "https" {
+ return fmt.Errorf("OIDC end_session_endpoint MUST use HTTPS but does not: %v", u.String())
+ } else {
+ ctrl.endSessionURL = u
+ }
+ }
return nil
}
func (ctrl *oidcLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
- return logout(ctx, ctrl.Cluster, opts)
+ err := ctrl.setup()
+ if err != nil {
+ return arvados.LogoutResponse{}, fmt.Errorf("error setting up OpenID Connect provider: %s", err)
+ }
+ resp, err := logout(ctx, ctrl.Cluster, opts)
+ if err != nil {
+ return arvados.LogoutResponse{}, err
+ }
+ creds, credsOK := auth.FromContext(ctx)
+ if ctrl.endSessionURL != nil && credsOK && len(creds.Tokens) > 0 {
+ values := ctrl.endSessionURL.Query()
+ values.Set("client_id", ctrl.ClientID)
+ values.Set("post_logout_redirect_uri", resp.RedirectLocation)
+ u := *ctrl.endSessionURL
+ u.RawQuery = values.Encode()
+ resp.RedirectLocation = u.String()
+ }
+ return resp, err
}
func (ctrl *oidcLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
@@ -116,6 +154,9 @@ func (ctrl *oidcLoginController) Login(ctx context.Context, opts arvados.LoginOp
if opts.ReturnTo == "" {
return loginError(errors.New("missing return_to parameter"))
}
+ if err := validateLoginRedirectTarget(ctrl.Parent.cluster, opts.ReturnTo); err != nil {
+ return loginError(fmt.Errorf("invalid return_to parameter: %s", err))
+ }
state := ctrl.newOAuth2State([]byte(ctrl.Cluster.SystemRootToken), opts.Remote, opts.ReturnTo)
var authparams []oauth2.AuthCodeOption
for k, v := range ctrl.AuthParams {
@@ -149,10 +190,39 @@ func (ctrl *oidcLoginController) Login(ctx context.Context, opts arvados.LoginOp
return loginError(err)
}
ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
- return ctrl.Parent.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
- ReturnTo: state.Remote + "," + state.ReturnTo,
+ resp, err := ctrl.Parent.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ ReturnTo: state.Remote + ",https://controller.api.client.invalid",
AuthInfo: *authinfo,
})
+ if err != nil {
+ return resp, err
+ }
+ // Extract token from rails' UserSessionCreate response, and
+ // attach it to our caller's desired ReturnTo URL. The Rails
+ // handler explicitly disallows sending the real ReturnTo as a
+ // belt-and-suspenders defence against Rails accidentally
+ // exposing an additional login relay.
+ u, err := url.Parse(resp.RedirectLocation)
+ if err != nil {
+ return resp, err
+ }
+ token := u.Query().Get("api_token")
+ if token == "" {
+ resp.RedirectLocation = state.ReturnTo
+ } else {
+ u, err := url.Parse(state.ReturnTo)
+ if err != nil {
+ return resp, err
+ }
+ q := u.Query()
+ if q == nil {
+ q = url.Values{}
+ }
+ q.Set("api_token", token)
+ u.RawQuery = q.Encode()
+ resp.RedirectLocation = u.String()
+ }
+ return resp, nil
}
func (ctrl *oidcLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
@@ -335,7 +405,7 @@ func OIDCAccessTokenAuthorizer(cluster *arvados.Cluster, getdb func(context.Cont
// We want ctrl to be nil if the chosen controller is not a
// *oidcLoginController, so we can ignore the 2nd return value
// of this type cast.
- ctrl, _ := NewConn(cluster).loginController.(*oidcLoginController)
+ ctrl, _ := NewConn(context.Background(), cluster, getdb).loginController.(*oidcLoginController)
cache, err := lru.New2Q(tokenCacheSize)
if err != nil {
panic(err)
@@ -390,6 +460,9 @@ func (ta *oidcTokenAuthorizer) WrapCalls(origFunc api.RoutableFunc) api.Routable
}
}
+// Matches error from oidc UserInfo() when receiving HTTP status 5xx
+var re5xxError = regexp.MustCompile(`^5\d\d `)
+
// registerToken checks whether tok is a valid OIDC Access Token and,
// if so, ensures that an api_client_authorizations row exists so that
// RailsAPI will accept it as an Arvados token.
@@ -459,6 +532,7 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
return fmt.Errorf("error setting up OpenID Connect provider: %s", err)
}
if ok, err := ta.checkAccessTokenScope(ctx, tok); err != nil || !ok {
+ // Note checkAccessTokenScope logs any interesting errors
ta.cache.Add(tok, time.Now().Add(tokenCacheNegativeTTL))
return err
}
@@ -467,6 +541,21 @@ func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) er
}
userinfo, err := ta.ctrl.provider.UserInfo(ctx, oauth2.StaticTokenSource(oauth2Token))
if err != nil {
+ if neterr := net.Error(nil); errors.As(err, &neterr) || re5xxError.MatchString(err.Error()) {
+ // If this token is in fact a valid OIDC
+ // token, but we failed to validate it here
+ // because of a network problem or internal
+ // server error, we error out now with a 5xx
+ // error, indicating to the client that they
+ // can try again. If we didn't error out now,
+ // the unrecognized token would eventually
+ // cause a 401 error further down the stack,
+ // which the caller would interpret as an
+ // unrecoverable failure.
+ ctxlog.FromContext(ctx).WithError(err).Debugf("treating OIDC UserInfo lookup error type %T as transient; failing request instead of forwarding token blindly", err)
+ return err
+ }
+ ctxlog.FromContext(ctx).WithError(err).WithField("HMAC", hmac).Debug("UserInfo failed (not an OIDC token?), caching negative result")
ta.cache.Add(tok, time.Now().Add(tokenCacheNegativeTTL))
return nil
}
@@ -560,6 +649,6 @@ func (ta *oidcTokenAuthorizer) checkAccessTokenScope(ctx context.Context, tok st
return true, nil
}
}
- ctxlog.FromContext(ctx).WithFields(logrus.Fields{"have": claims.Scope, "need": ta.ctrl.AcceptAccessTokenScope}).Infof("unacceptable access token scope")
+ ctxlog.FromContext(ctx).WithFields(logrus.Fields{"have": claims.Scope, "need": ta.ctrl.AcceptAccessTokenScope}).Info("unacceptable access token scope")
return false, httpserver.ErrorWithStatus(errors.New("unacceptable access token scope"), http.StatusUnauthorized)
}
diff --git a/lib/controller/localdb/login_oidc_test.go b/lib/controller/localdb/login_oidc_test.go
index b9f0f56e05..f505f5bc49 100644
--- a/lib/controller/localdb/login_oidc_test.go
+++ b/lib/controller/localdb/login_oidc_test.go
@@ -15,18 +15,18 @@ import (
"net/http"
"net/http/httptest"
"net/url"
+ "regexp"
"sort"
"strings"
"sync"
"testing"
"time"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/jmoiron/sqlx"
check "gopkg.in/check.v1"
)
@@ -39,20 +39,14 @@ func Test(t *testing.T) {
var _ = check.Suite(&OIDCLoginSuite{})
type OIDCLoginSuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- railsSpy *arvadostest.Proxy
+ localdbSuite
+ trustedURL *arvados.URL
fakeProvider *arvadostest.OIDCProvider
}
-func (s *OIDCLoginSuite) TearDownSuite(c *check.C) {
- // Undo any changes/additions to the user database so they
- // don't affect subsequent tests.
- arvadostest.ResetEnv()
- c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
-}
-
func (s *OIDCLoginSuite) SetUpTest(c *check.C) {
+ s.trustedURL = &arvados.URL{Scheme: "https", Host: "app.example.com:443", Path: "/"}
+
s.fakeProvider = arvadostest.NewOIDCProvider(c)
s.fakeProvider.AuthEmail = "active-user@arvados.local"
s.fakeProvider.AuthEmailVerified = true
@@ -62,35 +56,115 @@ func (s *OIDCLoginSuite) SetUpTest(c *check.C) {
s.fakeProvider.ValidCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
s.fakeProvider.PeopleAPIResponse = map[string]interface{}{}
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
+ s.localdbSuite.SetUpTest(c)
+
s.cluster.Login.Test.Enable = false
s.cluster.Login.Google.Enable = true
s.cluster.Login.Google.ClientID = "test%client$id"
s.cluster.Login.Google.ClientSecret = "test#client/secret"
+ s.cluster.Login.TrustedClients = map[arvados.URL]struct{}{*s.trustedURL: {}}
s.cluster.Users.PreferDomainForUsername = "PreferDomainForUsername.example.com"
s.fakeProvider.ValidClientID = "test%client$id"
s.fakeProvider.ValidClientSecret = "test#client/secret"
- s.localdb = NewConn(s.cluster)
+ s.localdb = NewConn(s.ctx, s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
c.Assert(s.localdb.loginController, check.FitsTypeOf, (*oidcLoginController)(nil))
s.localdb.loginController.(*oidcLoginController).Issuer = s.fakeProvider.Issuer.URL
s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
}
-func (s *OIDCLoginSuite) TearDownTest(c *check.C) {
- s.railsSpy.Close()
-}
-
func (s *OIDCLoginSuite) TestGoogleLogout(c *check.C) {
+ s.cluster.Login.TrustedClients[arvados.URL{Scheme: "https", Host: "foo.example", Path: "/"}] = struct{}{}
+ s.cluster.Login.TrustPrivateNetworks = false
+
resp, err := s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: "https://foo.example.com/bar"})
+ c.Check(err, check.NotNil)
+ c.Check(resp.RedirectLocation, check.Equals, "")
+
+ resp, err = s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: "https://127.0.0.1/bar"})
+ c.Check(err, check.NotNil)
+ c.Check(resp.RedirectLocation, check.Equals, "")
+
+ resp, err = s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: "https://foo.example/bar"})
c.Check(err, check.IsNil)
- c.Check(resp.RedirectLocation, check.Equals, "https://foo.example.com/bar")
+ c.Check(resp.RedirectLocation, check.Equals, "https://foo.example/bar")
+
+ s.cluster.Login.TrustPrivateNetworks = true
+
+ resp, err = s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: "https://192.168.1.1/bar"})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, "https://192.168.1.1/bar")
+}
+
+func (s *OIDCLoginSuite) checkRPInitiatedLogout(c *check.C, returnTo string) {
+ if !c.Check(s.fakeProvider.EndSessionEndpoint, check.NotNil,
+ check.Commentf("buggy test: EndSessionEndpoint not configured")) {
+ return
+ }
+ expURL, err := url.Parse(s.fakeProvider.Issuer.URL)
+ if !c.Check(err, check.IsNil, check.Commentf("error parsing expected URL")) {
+ return
+ }
+ expURL.Path = expURL.Path + s.fakeProvider.EndSessionEndpoint.Path
+
+ accessToken := s.fakeProvider.ValidAccessToken()
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, accessToken)
+ resp, err := s.localdb.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo})
+ if !c.Check(err, check.IsNil) {
+ return
+ }
+ loc, err := url.Parse(resp.RedirectLocation)
+ if !c.Check(err, check.IsNil, check.Commentf("error parsing response URL")) {
+ return
+ }
+
+ c.Check(loc.Scheme, check.Equals, "https")
+ c.Check(loc.Host, check.Equals, expURL.Host)
+ c.Check(loc.Path, check.Equals, expURL.Path)
+
+ var expReturn string
+ switch returnTo {
+ case "":
+ expReturn = s.cluster.Services.Workbench2.ExternalURL.String()
+ default:
+ expReturn = returnTo
+ }
+ values := loc.Query()
+ c.Check(values.Get("client_id"), check.Equals, s.cluster.Login.Google.ClientID)
+ c.Check(values.Get("post_logout_redirect_uri"), check.Equals, expReturn)
+}
+
+func (s *OIDCLoginSuite) TestRPInitiatedLogoutWithoutReturnTo(c *check.C) {
+ s.fakeProvider.EndSessionEndpoint = &url.URL{Path: "/logout/fromRP"}
+ s.checkRPInitiatedLogout(c, "")
+}
+
+func (s *OIDCLoginSuite) TestRPInitiatedLogoutWithReturnTo(c *check.C) {
+ s.fakeProvider.EndSessionEndpoint = &url.URL{Path: "/rp_logout"}
+ u := arvados.URL{Scheme: "https", Host: "foo.example", Path: "/"}
+ s.cluster.Login.TrustedClients[u] = struct{}{}
+ s.checkRPInitiatedLogout(c, u.String())
+}
+
+func (s *OIDCLoginSuite) TestEndSessionEndpointBadScheme(c *check.C) {
+ // RP-Initiated Logout 1.0 says: "This URL MUST use the https scheme..."
+ u := url.URL{Scheme: "http", Host: "example.com"}
+ s.fakeProvider.EndSessionEndpoint = &u
+ _, err := s.localdb.Logout(s.ctx, arvados.LogoutOptions{})
+ c.Check(err, check.ErrorMatches,
+ `.*\bend_session_endpoint MUST use HTTPS but does not: `+regexp.QuoteMeta(u.String()))
+}
+
+func (s *OIDCLoginSuite) TestNoRPInitiatedLogoutWithoutToken(c *check.C) {
+ endPath := "/TestNoRPInitiatedLogoutWithoutToken"
+ s.fakeProvider.EndSessionEndpoint = &url.URL{Path: endPath}
+ resp, _ := s.localdb.Logout(s.ctx, arvados.LogoutOptions{})
+ u, err := url.Parse(resp.RedirectLocation)
+ c.Check(err, check.IsNil)
+ c.Check(strings.HasSuffix(u.Path, endPath), check.Equals, false,
+ check.Commentf("logout redirected to end_session_endpoint without token"))
}
func (s *OIDCLoginSuite) TestGoogleLogin_Start_Bogus(c *check.C) {
@@ -118,6 +192,13 @@ func (s *OIDCLoginSuite) TestGoogleLogin_Start(c *check.C) {
}
}
+func (s *OIDCLoginSuite) TestGoogleLogin_UnknownClient(c *check.C) {
+ resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{ReturnTo: "https://bad-app.example.com/foo?bar"})
+ c.Check(err, check.IsNil)
+ c.Check(resp.RedirectLocation, check.Equals, "")
+ c.Check(resp.HTML.String(), check.Matches, `(?ms).*requesting site is not listed in TrustedClients.*`)
+}
+
func (s *OIDCLoginSuite) TestGoogleLogin_InvalidCode(c *check.C) {
state := s.startLogin(c)
resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
@@ -169,7 +250,7 @@ func (s *OIDCLoginSuite) TestConfig(c *check.C) {
s.cluster.Login.OpenIDConnect.ClientID = "oidc-client-id"
s.cluster.Login.OpenIDConnect.ClientSecret = "oidc-client-secret"
s.cluster.Login.OpenIDConnect.AuthenticationRequestParameters = map[string]string{"testkey": "testvalue"}
- localdb := NewConn(s.cluster)
+ localdb := NewConn(context.Background(), s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
ctrl := localdb.loginController.(*oidcLoginController)
c.Check(ctrl.Issuer, check.Equals, "https://accounts.example.com/")
c.Check(ctrl.ClientID, check.Equals, "oidc-client-id")
@@ -184,7 +265,7 @@ func (s *OIDCLoginSuite) TestConfig(c *check.C) {
s.cluster.Login.Google.ClientSecret = "google-client-secret"
s.cluster.Login.Google.AlternateEmailAddresses = enableAltEmails
s.cluster.Login.Google.AuthenticationRequestParameters = map[string]string{"testkey": "testvalue"}
- localdb = NewConn(s.cluster)
+ localdb = NewConn(context.Background(), s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
ctrl = localdb.loginController.(*oidcLoginController)
c.Check(ctrl.Issuer, check.Equals, "https://accounts.google.com")
c.Check(ctrl.ClientID, check.Equals, "google-client-id")
@@ -228,16 +309,73 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
io.WriteString(mac, accessToken)
apiToken := fmt.Sprintf("%x", mac.Sum(nil))
+ checkTokenInDB := func() time.Time {
+ var exp time.Time
+ err := db.QueryRow(`select expires_at at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)
+ c.Check(err, check.IsNil)
+ c.Check(exp.Sub(time.Now()) > -time.Second, check.Equals, true)
+ c.Check(exp.Sub(time.Now()) < time.Second, check.Equals, true)
+ return exp
+ }
cleanup := func() {
+ oidcAuthorizer.cache.Purge()
_, err := db.Exec(`delete from api_client_authorizations where api_token=$1`, apiToken)
c.Check(err, check.IsNil)
}
cleanup()
defer cleanup()
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{accessToken}})
- var exp1 time.Time
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, accessToken)
+
+ // Check behavior on 5xx/network errors (don't cache) vs 4xx
+ // (do cache)
+ {
+ call := oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return nil, nil
+ })
+
+ // If fakeProvider UserInfo endpoint returns 502, we
+ // should fail, return an error, and *not* cache the
+ // negative result.
+ tokenCacheNegativeTTL = time.Minute
+ s.fakeProvider.UserInfoErrorStatus = 502
+ _, err := call(ctx, nil)
+ c.Check(err, check.NotNil)
+
+ // The negative result was not cached, so retrying
+ // immediately (with UserInfo working now) should
+ // succeed.
+ s.fakeProvider.UserInfoErrorStatus = 0
+ _, err = call(ctx, nil)
+ c.Check(err, check.IsNil)
+ checkTokenInDB()
+
+ cleanup()
+
+ // UserInfo 401 => cache the negative result, but
+ // don't return an error (just pass the token through
+ // as a v1 token)
+ s.fakeProvider.UserInfoErrorStatus = 401
+ _, err = call(ctx, nil)
+ c.Check(err, check.IsNil)
+ ent, ok := oidcAuthorizer.cache.Get(accessToken)
+ c.Check(ok, check.Equals, true)
+ c.Check(ent, check.FitsTypeOf, time.Time{})
+
+ // UserInfo succeeds now, but we still have a cached
+ // negative result.
+ s.fakeProvider.UserInfoErrorStatus = 0
+ _, err = call(ctx, nil)
+ c.Check(err, check.IsNil)
+ ent, ok = oidcAuthorizer.cache.Get(accessToken)
+ c.Check(ok, check.Equals, true)
+ c.Check(ent, check.FitsTypeOf, time.Time{})
+
+ tokenCacheNegativeTTL = time.Millisecond
+ cleanup()
+ }
+ var exp1 time.Time
concurrent := 4
s.fakeProvider.HoldUserInfo = make(chan *http.Request)
s.fakeProvider.ReleaseUserInfo = make(chan struct{})
@@ -257,17 +395,12 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
defer wg.Done()
_, err := oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
c.Logf("concurrent req %d/%d", i, concurrent)
- var exp time.Time
creds, ok := auth.FromContext(ctx)
c.Assert(ok, check.Equals, true)
c.Assert(creds.Tokens, check.HasLen, 1)
c.Check(creds.Tokens[0], check.Equals, accessToken)
-
- err := db.QueryRowContext(ctx, `select expires_at at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)
- c.Check(err, check.IsNil)
- c.Check(exp.Sub(time.Now()) > -time.Second, check.Equals, true)
- c.Check(exp.Sub(time.Now()) < time.Second, check.Equals, true)
+ exp := checkTokenInDB()
if i == 0 {
exp1 = exp
}
@@ -286,9 +419,7 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
// the expires_at value in the database.
time.Sleep(3 * time.Millisecond)
oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {
- var exp time.Time
- err := db.QueryRowContext(ctx, `select expires_at at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)
- c.Check(err, check.IsNil)
+ exp := checkTokenInDB()
c.Check(exp.Sub(exp1) > 0, check.Equals, true, check.Commentf("expect %v > 0", exp.Sub(exp1)))
c.Check(exp.Sub(exp1) < time.Second, check.Equals, true, check.Commentf("expect %v < 1s", exp.Sub(exp1)))
return nil, nil
@@ -296,7 +427,7 @@ func (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {
s.fakeProvider.AccessTokenPayload = map[string]interface{}{"scope": "openid profile foobar"}
accessToken = s.fakeProvider.ValidAccessToken()
- ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{accessToken}})
+ ctx = ctrlctx.NewWithToken(s.ctx, s.cluster, accessToken)
mac = hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))
io.WriteString(mac, accessToken)
@@ -399,7 +530,7 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
s.railsSpy.Close()
}
s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- s.localdb = NewConn(s.cluster)
+ s.localdb = NewConn(context.Background(), s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
state := s.startLogin(c, func(form url.Values) {
@@ -462,7 +593,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_Success(c *check.C) {
// Try using the returned Arvados token.
c.Logf("trying an API call with new token %q", token)
- ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{token}})
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, token)
cl, err := s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1})
c.Check(cl.ItemsAvailable, check.Not(check.Equals), 0)
c.Check(cl.Items, check.Not(check.HasLen), 0)
@@ -471,7 +602,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_Success(c *check.C) {
// Might as well check that bogus tokens aren't accepted.
badtoken := token + "plussomeboguschars"
c.Logf("trying an API call with mangled token %q", badtoken)
- ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{badtoken}})
+ ctx = ctrlctx.NewWithToken(s.ctx, s.cluster, badtoken)
cl, err = s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1})
c.Check(cl.Items, check.HasLen, 0)
c.Check(err, check.NotNil)
@@ -613,10 +744,14 @@ func (s *OIDCLoginSuite) startLogin(c *check.C, checks ...func(url.Values)) (sta
// the provider, just grab state from the redirect URL.
resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{ReturnTo: "https://app.example.com/foo?bar"})
c.Check(err, check.IsNil)
+ c.Check(resp.HTML.String(), check.Not(check.Matches), `(?ms).*error:.*`)
target, err := url.Parse(resp.RedirectLocation)
c.Check(err, check.IsNil)
state = target.Query().Get("state")
- c.Check(state, check.Not(check.Equals), "")
+ if !c.Check(state, check.Not(check.Equals), "") {
+ c.Logf("Redirect target: %q", target)
+ c.Logf("HTML: %q", resp.HTML)
+ }
for _, fn := range checks {
fn(target.Query())
}
@@ -624,6 +759,56 @@ func (s *OIDCLoginSuite) startLogin(c *check.C, checks ...func(url.Values)) (sta
return
}
+func (s *OIDCLoginSuite) TestValidateLoginRedirectTarget(c *check.C) {
+ for _, trial := range []struct {
+ permit bool
+ trustPrivate bool
+ url string
+ }{
+ // wb1, wb2 => accept
+ {true, false, s.cluster.Services.Workbench1.ExternalURL.String()},
+ {true, false, s.cluster.Services.Workbench2.ExternalURL.String()},
+ // explicitly listed host => accept
+ {true, false, "https://app.example.com/"},
+ {true, false, "https://app.example.com:443/foo?bar=baz"},
+ // non-listed hostname => deny (regardless of TrustPrivateNetworks)
+ {false, false, "https://bad.example/"},
+ {false, true, "https://bad.example/"},
+ // non-listed non-private IP addr => deny (regardless of TrustPrivateNetworks)
+ {false, true, "https://1.2.3.4/"},
+ {false, true, "https://1.2.3.4/"},
+ {false, true, "https://[ab::cd]:1234/"},
+ // localhost or non-listed private IP addr => accept only if TrustPrivateNetworks is set
+ {false, false, "https://localhost/"},
+ {true, true, "https://localhost/"},
+ {false, false, "https://[10.9.8.7]:80/foo"},
+ {true, true, "https://[10.9.8.7]:80/foo"},
+ {false, false, "https://[::1]:80/foo"},
+ {true, true, "https://[::1]:80/foo"},
+ {true, true, "http://192.168.1.1/"},
+ {true, true, "http://172.17.2.0/"},
+ // bad url => deny
+ {false, true, "https://10.1.1.1:blorp/foo"}, // non-numeric port
+ {false, true, "https://app.example.com:blorp/foo"}, // non-numeric port
+ {false, true, "https://]:443"},
+ {false, true, "https://"},
+ {false, true, "https:"},
+ {false, true, ""},
+ // explicitly listed host but different port, protocol, or user/pass => deny
+ {false, true, "http://app.example.com/"},
+ {false, true, "http://app.example.com:443/"},
+ {false, true, "https://app.example.com:80/"},
+ {false, true, "https://app.example.com:4433/"},
+ {false, true, "https://u:p@app.example.com:443/foo?bar=baz"},
+ } {
+ c.Logf("trial %+v", trial)
+ s.cluster.Login.TrustPrivateNetworks = trial.trustPrivate
+ err := validateLoginRedirectTarget(s.cluster, trial.url)
+ c.Check(err == nil, check.Equals, trial.permit)
+ }
+
+}
+
func getCallbackAuthInfo(c *check.C, railsSpy *arvadostest.Proxy) (authinfo rpc.UserSessionAuthInfo) {
for _, dump := range railsSpy.RequestDumps {
c.Logf("spied request: %q", dump)
diff --git a/lib/controller/localdb/login_pam.go b/lib/controller/localdb/login_pam.go
index 14e0a582c1..4669122543 100644
--- a/lib/controller/localdb/login_pam.go
+++ b/lib/controller/localdb/login_pam.go
@@ -57,6 +57,7 @@ func (ctrl *pamLoginController) UserAuthenticate(ctx context.Context, opts arvad
if err != nil {
return arvados.APIClientAuthorization{}, err
}
+ // Check that the given credentials are valid.
err = tx.Authenticate(pam.DisallowNullAuthtok)
if err != nil {
err = fmt.Errorf("PAM: %s", err)
@@ -77,6 +78,15 @@ func (ctrl *pamLoginController) UserAuthenticate(ctx context.Context, opts arvad
if errorMessage != "" {
return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(errorMessage), http.StatusUnauthorized)
}
+ // Check that the account/user is permitted to access this host.
+ err = tx.AcctMgmt(pam.DisallowNullAuthtok)
+ if err != nil {
+ err = fmt.Errorf("PAM: %s", err)
+ if errorMessage != "" {
+ err = fmt.Errorf("%s; %q", err, errorMessage)
+ }
+ return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(err, http.StatusUnauthorized)
+ }
user, err := tx.GetItem(pam.User)
if err != nil {
return arvados.APIClientAuthorization{}, err
diff --git a/lib/controller/localdb/login_pam_test.go b/lib/controller/localdb/login_pam_test.go
index c5876bbfad..2c3fa4d0f7 100644
--- a/lib/controller/localdb/login_pam_test.go
+++ b/lib/controller/localdb/login_pam_test.go
@@ -5,44 +5,33 @@
package localdb
import (
- "context"
"io/ioutil"
"net/http"
"os"
"strings"
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&PamSuite{})
type PamSuite struct {
- cluster *arvados.Cluster
- ctrl *pamLoginController
- railsSpy *arvadostest.Proxy
+ localdbSuite
}
-func (s *PamSuite) SetUpSuite(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
+func (s *PamSuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
s.cluster.Login.PAM.Enable = true
s.cluster.Login.PAM.DefaultEmailDomain = "example.com"
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- s.ctrl = &pamLoginController{
+ s.localdb.loginController = &pamLoginController{
Cluster: s.cluster,
- Parent: &Conn{railsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)},
+ Parent: s.localdb,
}
}
func (s *PamSuite) TestLoginFailure(c *check.C) {
- resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+ resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: "bogususername",
Password: "boguspassword",
})
@@ -57,6 +46,9 @@ func (s *PamSuite) TestLoginFailure(c *check.C) {
// This test only runs if the ARVADOS_TEST_PAM_CREDENTIALS_FILE env
// var is set. The credentials file should contain a valid username
// and password, separated by \n.
+//
+// Depending on the host config, this test succeeds only if the test
+// credentials are for the same account being used to run tests.
func (s *PamSuite) TestLoginSuccess(c *check.C) {
testCredsFile := os.Getenv("ARVADOS_TEST_PAM_CREDENTIALS_FILE")
if testCredsFile == "" {
@@ -69,7 +61,7 @@ func (s *PamSuite) TestLoginSuccess(c *check.C) {
c.Assert(len(lines), check.Equals, 2, check.Commentf("credentials file %s should contain \"username\\npassword\"", testCredsFile))
u, p := lines[0], lines[1]
- resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+ resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: u,
Password: p,
})
diff --git a/lib/controller/localdb/login_test.go b/lib/controller/localdb/login_test.go
new file mode 100644
index 0000000000..5c8e92862f
--- /dev/null
+++ b/lib/controller/localdb/login_test.go
@@ -0,0 +1,75 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "encoding/json"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&loginSuite{})
+
+type loginSuite struct{}
+
+func (s *loginSuite) TestValidateLoginRedirectTarget(c *check.C) {
+ var cluster arvados.Cluster
+ for _, trial := range []struct {
+ pass bool
+ wb1 string
+ wb2 string
+ trusted string
+ target string
+ }{
+ {true, "https://wb1.example/", "https://wb2.example/", "", "https://wb2.example/"},
+ {true, "https://wb1.example:443/", "https://wb2.example:443/", "", "https://wb2.example/"},
+ {true, "https://wb1.example:443/", "https://wb2.example:443/", "", "https://wb2.example"},
+ {true, "https://wb1.example:443", "https://wb2.example:443", "", "https://wb2.example/"},
+ {true, "http://wb1.example:80/", "http://wb2.example:80/", "", "http://wb2.example/"},
+ {false, "https://wb1.example:80/", "https://wb2.example:80/", "", "https://wb2.example/"},
+ {false, "https://wb1.example:1234/", "https://wb2.example:1234/", "", "https://wb2.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "", "https://bad.wb2.example/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://good.wb2.example/", "https://good.wb2.example"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://good.wb2.example:443/", "https://good.wb2.example"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://good.wb2.example:443", "https://good.wb2.example/"},
+
+ {true, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://ok.wildcard.example/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://ok.ok.wildcard.example/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://[ok.ok.wildcard.example]:443/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://[*.wildcard.example]:443", "https://ok.ok.wildcard.example/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example:443", "https://ok.wildcard.example/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://ok.wildcard.example:443/"},
+ {true, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example:443", "https://ok.wildcard.example:443/"},
+
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "http://wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "http://.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "http://wrongscheme.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "http://wrongscheme.wildcard.example:443/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://wrongport.wildcard.example:80/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://notmatching-wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "http://notmatching.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example:443", "https://attacker.example/ok.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://attacker.example/ok.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://attacker.example/?https://ok.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*.wildcard.example", "https://attacker.example/#https://ok.wildcard.example/"},
+ {false, "https://wb1.example/", "https://wb2.example/", "https://*-wildcard.example", "https://notsupported-wildcard.example/"},
+ } {
+ c.Logf("trial %+v", trial)
+ // We use json.Unmarshal() to load the test strings
+ // because we're testing behavior when the config file
+ // contains string X.
+ err := json.Unmarshal([]byte(`"`+trial.wb1+`"`), &cluster.Services.Workbench1.ExternalURL)
+ c.Assert(err, check.IsNil)
+ err = json.Unmarshal([]byte(`"`+trial.wb2+`"`), &cluster.Services.Workbench2.ExternalURL)
+ c.Assert(err, check.IsNil)
+ if trial.trusted != "" {
+ err = json.Unmarshal([]byte(`{"`+trial.trusted+`": {}}`), &cluster.Login.TrustedClients)
+ c.Assert(err, check.IsNil)
+ }
+ err = validateLoginRedirectTarget(&cluster, trial.target)
+ c.Check(err == nil, check.Equals, trial.pass)
+ }
+}
diff --git a/lib/controller/localdb/login_testuser_test.go b/lib/controller/localdb/login_testuser_test.go
index 51c2416f59..51dcaab9db 100644
--- a/lib/controller/localdb/login_testuser_test.go
+++ b/lib/controller/localdb/login_testuser_test.go
@@ -5,59 +5,30 @@
package localdb
import (
- "context"
"database/sql"
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/jmoiron/sqlx"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&TestUserSuite{})
type TestUserSuite struct {
- cluster *arvados.Cluster
- ctrl *testLoginController
- railsSpy *arvadostest.Proxy
- db *sqlx.DB
-
- // transaction context
- ctx context.Context
- tx *sqlx.Tx
+ localdbSuite
}
-func (s *TestUserSuite) SetUpSuite(c *check.C) {
- cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
- c.Assert(err, check.IsNil)
- s.cluster, err = cfg.GetCluster("")
- c.Assert(err, check.IsNil)
+func (s *TestUserSuite) SetUpTest(c *check.C) {
+ s.localdbSuite.SetUpTest(c)
s.cluster.Login.Test.Enable = true
s.cluster.Login.Test.Users = map[string]arvados.TestUser{
"valid": {Email: "valid@example.com", Password: "v@l1d"},
}
- s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- s.ctrl = &testLoginController{
+ s.localdb.loginController = &testLoginController{
Cluster: s.cluster,
- Parent: &Conn{railsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)},
+ Parent: s.localdb,
}
- s.db = arvadostest.DB(c, s.cluster)
-}
-
-func (s *TestUserSuite) SetUpTest(c *check.C) {
- tx, err := s.db.Beginx()
- c.Assert(err, check.IsNil)
- s.ctx = ctrlctx.NewWithTransaction(context.Background(), tx)
- s.tx = tx
-}
-
-func (s *TestUserSuite) TearDownTest(c *check.C) {
- s.tx.Rollback()
}
func (s *TestUserSuite) TestLogin(c *check.C) {
@@ -74,7 +45,7 @@ func (s *TestUserSuite) TestLogin(c *check.C) {
{true, "valid@example.com", "v@l1d"},
} {
c.Logf("=== %#v", trial)
- resp, err := s.ctrl.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
+ resp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: trial.username,
Password: trial.password,
})
@@ -94,7 +65,7 @@ func (s *TestUserSuite) TestLogin(c *check.C) {
}
func (s *TestUserSuite) TestLoginForm(c *check.C) {
- resp, err := s.ctrl.Login(s.ctx, arvados.LoginOptions{
+ resp, err := s.localdb.Login(s.ctx, arvados.LoginOptions{
ReturnTo: "https://localhost:12345/example",
})
c.Check(err, check.IsNil)
@@ -103,7 +74,8 @@ func (s *TestUserSuite) TestLoginForm(c *check.C) {
}
func (s *TestUserSuite) TestExpireTokenOnLogout(c *check.C) {
- returnTo := "https://localhost:12345/logout"
+ s.cluster.Login.TrustPrivateNetworks = true
+ returnTo := "https://[::1]:12345/logout"
for _, trial := range []struct {
requestToken string
expiringTokenUUID string
@@ -119,9 +91,7 @@ func (s *TestUserSuite) TestExpireTokenOnLogout(c *check.C) {
{"v2/some-fake-uuid/thisdoesntexistasatoken", "", false},
} {
c.Logf("=== %#v", trial)
- ctx := auth.NewContext(s.ctx, &auth.Credentials{
- Tokens: []string{trial.requestToken},
- })
+ ctx := ctrlctx.NewWithToken(s.ctx, s.cluster, trial.requestToken)
var tokenUUID string
var err error
@@ -132,7 +102,7 @@ func (s *TestUserSuite) TestExpireTokenOnLogout(c *check.C) {
c.Check(err, check.IsNil)
}
- resp, err := s.ctrl.Logout(ctx, arvados.LogoutOptions{
+ resp, err := s.localdb.Logout(ctx, arvados.LogoutOptions{
ReturnTo: returnTo,
})
c.Check(err, check.IsNil)
diff --git a/lib/controller/localdb/logout.go b/lib/controller/localdb/logout.go
index e1603f1448..04e7681ad7 100644
--- a/lib/controller/localdb/logout.go
+++ b/lib/controller/localdb/logout.go
@@ -33,6 +33,8 @@ func logout(ctx context.Context, cluster *arvados.Cluster, opts arvados.LogoutOp
} else {
target = cluster.Services.Workbench1.ExternalURL.String()
}
+ } else if err := validateLoginRedirectTarget(cluster, target); err != nil {
+ return arvados.LogoutResponse{}, httpserver.ErrorWithStatus(fmt.Errorf("invalid return_to parameter: %s", err), http.StatusBadRequest)
}
return arvados.LogoutResponse{RedirectLocation: target}, nil
}
diff --git a/lib/controller/localdb/testdata/dsa.pub b/lib/controller/localdb/testdata/dsa.pub
new file mode 100644
index 0000000000..8a2743d91d
--- /dev/null
+++ b/lib/controller/localdb/testdata/dsa.pub
@@ -0,0 +1 @@
+ssh-dss AAAAB3NzaC1kc3MAAACBAIS5sFWjsFPK5yEa/TjXEEudJrBaFjQ6WvYLiJmh8AmCqWlC83ETv5gEFeIwJo8om8bat4n6l6IKkG4wDo7uxNN0lEWGnOBXatpWOcrJphb0PgYMstZnW7K5GBpTY52TDShx5OS5nvb9iJiQjd1/WQ63knmYoVZH3Ijhv6vDikL3AAAAFQDotNYD4D4IjS8BjJFk8qCGg1FWGQAAAIBlqZ/KwlJpJiekR2Yv+8k456kiFhPUasjeDqx+zGP//+0xNGx2yYzdkPlmvYrdG3YvRjA8KX5C+qJT9CfS1FMcY8/3cXWmDCxi3zKvaXjUcLk1nfVbhsPHdaebpSX3N+C6meehjoQIhYIgZghdPuWOgyGjwIavO9DYMlTGVhHRCgAAAIAjqJonYsmaSd3/0SoD2NGKBvRhngKcaTu63OLIY/V2kdg4Zrph7Ptx//S994rlhugLq68c0wnNoeq4vjVoRY8gDaCy8KXsk9Sq8THbxNseFeqa04txJJXe7g8/6nopfqrhi0NgpIyaNn/0BfqjWOErQuhzxhMqZ5if0aRi1k+g5A== tom@slab
diff --git a/lib/controller/localdb/testdata/ecdsa-sk.pub b/lib/controller/localdb/testdata/ecdsa-sk.pub
new file mode 100644
index 0000000000..9f18e6b65c
--- /dev/null
+++ b/lib/controller/localdb/testdata/ecdsa-sk.pub
@@ -0,0 +1 @@
+sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBFj1zodcmSKWeUgNxzDOv7m9TeLhNRb64wa9oQwQK4tFZzLQRgcsmaVQmMx/ZbY+ThZbHLHSpKRxaByINu99NKUAAAAEc3NoOg== tom@slab
diff --git a/lib/controller/localdb/testdata/ecdsa.pub b/lib/controller/localdb/testdata/ecdsa.pub
new file mode 100644
index 0000000000..b34e821e6f
--- /dev/null
+++ b/lib/controller/localdb/testdata/ecdsa.pub
@@ -0,0 +1 @@
+ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDLajzRPnSI3FBChDvvNJyIBPdyA/nC7GWFWwizK93XL8HkQ5+X6D/xaqowq6iIPq/XHSdbZ3ebdb0OH81ovrCQ= tom@slab
diff --git a/lib/controller/localdb/testdata/ed25519-sk.pub b/lib/controller/localdb/testdata/ed25519-sk.pub
new file mode 100644
index 0000000000..0aa08f57bd
--- /dev/null
+++ b/lib/controller/localdb/testdata/ed25519-sk.pub
@@ -0,0 +1 @@
+sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIJMteBo9BvwQTeiBq4FvS4qJ83YjoCvKrH6EnvrOCILmAAAABHNzaDo= test key
diff --git a/lib/controller/localdb/testdata/ed25519.pub b/lib/controller/localdb/testdata/ed25519.pub
new file mode 100644
index 0000000000..ffcde15401
--- /dev/null
+++ b/lib/controller/localdb/testdata/ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElzlGk8QUevhJQ2mhf8p73lUAh044icWqssl3bMoCaT tom@slab
diff --git a/lib/controller/localdb/testdata/generate b/lib/controller/localdb/testdata/generate
new file mode 100755
index 0000000000..d39d72a91d
--- /dev/null
+++ b/lib/controller/localdb/testdata/generate
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This script uses ssh-keygen to generate an example public key for
+# each supported type, to be used by test cases. Private keys are
+# discarded. If ${keytype}.pub already exists, it is left alone.
+
+set -e
+
+err=
+keytypes=$(ssh-keygen -_ 2>&1 | grep -- -t | tr -d '[|]' | tr ' ' '\n' | grep -vw t)
+for keytype in ${keytypes[@]}; do
+ if [[ ! -e "./${keytype}.pub" ]]; then
+ if ssh-keygen -t "${keytype}" -f "./${keytype}" -N ""; then
+ # discard private key
+ rm "./${keytype}"
+ else
+ echo >&2 "ssh-keygen -t ${keytype} failed"
+ err=1
+ fi
+ fi
+done
+exit $err
diff --git a/lib/controller/localdb/testdata/rsa.pub b/lib/controller/localdb/testdata/rsa.pub
new file mode 100644
index 0000000000..4b5ab75ec7
--- /dev/null
+++ b/lib/controller/localdb/testdata/rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCtlBJsNterzUR26k/3tbXi2LViRj0vPyyJ7msqyGtRjJKuMqZkVJz6GN42/+aESeHfJw9FNlwW4oMa3Z4BB5llvZSG8yhY1HXbBlK5sURjSo9tid/U+PlKPGqteiXTguXLj5PAwoAoQ4JnGKR/+YphWxuWy+VR4toLcuKG9pX5d6iwkmWU1/smUnF6+vq38Xrhv94EpeNmyTEPC6OijDdmcas3rwDGW/I2Vij/Bxdj9DY/tHLv9V+yznbV1YB9yxda0YeIGMa2d35dOIxBeWmXzAGczVNQeXE7ooFOH6zCyoJZ4HH/AhAZ9GHyNGsf72CM+WkTBUEYmBmRIDHtMXY32KxyreRWUU1l47md5gefkb4c57OI369AQed154SVQaoiiVqIXinXGGezmfa09nnaSelD54Hky71GC/qqMvzkv7pXkETB37hYC2z2NixXQ6pf21vRHZLAtA8LK9OB5yxdr9b5buMIdTLViKufr3pPk8bcJrlB7tilw5X/PUioWws= tom@slab
diff --git a/lib/controller/proxy.go b/lib/controller/proxy.go
index 47b8cb4711..26d1859ec8 100644
--- a/lib/controller/proxy.go
+++ b/lib/controller/proxy.go
@@ -45,6 +45,11 @@ var dropHeaders = map[string]bool{
// Content-Length depends on encoding.
"Content-Length": true,
+
+ // Defend against Rails vulnerability CVE-2023-22795 -
+ // we don't use this functionality anyway, so it costs us nothing.
+ //
+ "If-None-Match": true,
}
type ResponseFilter func(*http.Response, error) (*http.Response, error)
diff --git a/lib/controller/rails_restart_test.go b/lib/controller/rails_restart_test.go
index 5db37c4b8a..e3267c220f 100644
--- a/lib/controller/rails_restart_test.go
+++ b/lib/controller/rails_restart_test.go
@@ -79,9 +79,11 @@ func (s *railsRestartSuite) TestConfigReload(c *check.C) {
for deadline := time.Now().Add(20 * time.Second); time.Now().Before(deadline); time.Sleep(time.Second) {
resp, err = hc.Do(req)
c.Assert(err, check.IsNil)
+ defer resp.Body.Close()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
body, err = ioutil.ReadAll(resp.Body)
c.Assert(err, check.IsNil)
+ resp.Body.Close()
if strings.Contains(string(body), newhash) {
break
}
diff --git a/lib/controller/router/request.go b/lib/controller/router/request.go
index 31f2e1d7ba..68fffa0681 100644
--- a/lib/controller/router/request.go
+++ b/lib/controller/router/request.go
@@ -55,13 +55,25 @@ func guessAndParse(k, v string) (interface{}, error) {
// foo=["bar","baz"]?
}
-// Parse req as an Arvados V1 API request and return the request
-// parameters.
+// Return a map of incoming HTTP request parameters. Also load
+// parameters into opts, unless opts is nil.
//
// If the request has a parameter whose name is attrsKey (e.g.,
// "collection"), it is renamed to "attrs".
-func (rtr *router) loadRequestParams(req *http.Request, attrsKey string) (map[string]interface{}, error) {
+func (rtr *router) loadRequestParams(req *http.Request, attrsKey string, opts interface{}) (map[string]interface{}, error) {
+ // Here we call ParseForm and ParseMultipartForm explicitly
+ // (even though ParseMultipartForm calls ParseForm if
+ // necessary) to ensure we catch errors encountered in
+ // ParseForm. In the non-multipart-form case,
+ // ParseMultipartForm returns ErrNotMultipart and hides the
+ // ParseForm error.
err := req.ParseForm()
+ if err == nil {
+ err = req.ParseMultipartForm(int64(rtr.config.MaxRequestSize))
+ if err == http.ErrNotMultipart {
+ err = nil
+ }
+ }
if err != nil {
if err.Error() == "http: request body too large" {
return nil, httpError(http.StatusRequestEntityTooLarge, err)
@@ -141,6 +153,24 @@ func (rtr *router) loadRequestParams(req *http.Request, attrsKey string) (map[st
}
}
+ if opts != nil {
+ // Load all path, query, and form params into opts.
+ err = rtr.transcode(params, opts)
+ if err != nil {
+ return nil, fmt.Errorf("transcode: %w", err)
+ }
+
+ // Special case: if opts has Method or Header fields, load the
+ // request method/header.
+ err = rtr.transcode(struct {
+ Method string
+ Header http.Header
+ }{req.Method, req.Header}, opts)
+ if err != nil {
+ return nil, fmt.Errorf("transcode: %w", err)
+ }
+ }
+
return params, nil
}
diff --git a/lib/controller/router/request_test.go b/lib/controller/router/request_test.go
index 4544a6bb65..b689eb681f 100644
--- a/lib/controller/router/request_test.go
+++ b/lib/controller/router/request_test.go
@@ -8,10 +8,12 @@ import (
"bytes"
"encoding/json"
"io"
+ "mime/multipart"
"net/http"
"net/http/httptest"
"net/url"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
)
@@ -116,7 +118,7 @@ func (tr *testReq) Request() *http.Request {
}
if tr.json {
req.Header.Set("Content-Type", "application/json")
- } else {
+ } else if tr.header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
for k, v := range tr.header {
@@ -131,20 +133,30 @@ func (tr *testReq) bodyContent() string {
func (s *RouterSuite) TestAttrsInBody(c *check.C) {
attrs := map[string]interface{}{"foo": "bar"}
+
+ multipartBody := new(bytes.Buffer)
+ multipartWriter := multipart.NewWriter(multipartBody)
+ multipartWriter.WriteField("attrs", `{"foo":"bar"}`)
+ multipartWriter.Close()
+
for _, tr := range []testReq{
{attrsKey: "model_name", json: true, attrs: attrs},
{attrsKey: "model_name", json: true, attrs: attrs, jsonAttrsTop: true},
{attrsKey: "model_name", json: true, attrs: attrs, jsonAttrsTop: true, jsonStringParam: true},
{attrsKey: "model_name", json: true, attrs: attrs, jsonAttrsTop: false, jsonStringParam: true},
+ {body: multipartBody, header: http.Header{"Content-Type": []string{multipartWriter.FormDataContentType()}}},
} {
c.Logf("tr: %#v", tr)
req := tr.Request()
- params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ var opts struct{ Attrs struct{ Foo string } }
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
c.Logf("params: %#v", params)
c.Assert(err, check.IsNil)
c.Check(params, check.NotNil)
- c.Assert(params["attrs"], check.FitsTypeOf, map[string]interface{}{})
- c.Check(params["attrs"].(map[string]interface{})["foo"], check.Equals, "bar")
+ c.Check(opts.Attrs.Foo, check.Equals, "bar")
+ if c.Check(params["attrs"], check.FitsTypeOf, map[string]interface{}{}) {
+ c.Check(params["attrs"].(map[string]interface{})["foo"], check.Equals, "bar")
+ }
}
}
@@ -161,11 +173,14 @@ func (s *RouterSuite) TestBoolParam(c *check.C) {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
c.Logf("tr.body: %s", tr.bodyContent())
- params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ var opts struct{ EnsureUniqueName bool }
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
c.Logf("params: %#v", params)
c.Assert(err, check.IsNil)
- c.Check(params, check.NotNil)
- c.Check(params[testKey], check.Equals, false)
+ c.Check(opts.EnsureUniqueName, check.Equals, false)
+ if c.Check(params, check.NotNil) {
+ c.Check(params[testKey], check.Equals, false)
+ }
}
for i, tr := range []testReq{
@@ -177,11 +192,16 @@ func (s *RouterSuite) TestBoolParam(c *check.C) {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
c.Logf("tr.body: %s", tr.bodyContent())
- params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ var opts struct {
+ EnsureUniqueName bool `json:"ensure_unique_name"`
+ }
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
c.Logf("params: %#v", params)
c.Assert(err, check.IsNil)
- c.Check(params, check.NotNil)
- c.Check(params[testKey], check.Equals, true)
+ c.Check(opts.EnsureUniqueName, check.Equals, true)
+ if c.Check(params, check.NotNil) {
+ c.Check(params[testKey], check.Equals, true)
+ }
}
}
@@ -196,7 +216,7 @@ func (s *RouterSuite) TestOrderParam(c *check.C) {
} {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
- params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, nil)
c.Assert(err, check.IsNil)
c.Assert(params, check.NotNil)
if order, ok := params["order"]; ok && order != nil {
@@ -213,8 +233,10 @@ func (s *RouterSuite) TestOrderParam(c *check.C) {
} {
c.Logf("#%d, tr: %#v", i, tr)
req := tr.Request()
- params, err := s.rtr.loadRequestParams(req, tr.attrsKey)
+ var opts arvados.ListOptions
+ params, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)
c.Assert(err, check.IsNil)
+ c.Check(opts.Order, check.DeepEquals, []string{"foo", "bar desc"})
if _, ok := params["order"].([]string); ok {
c.Check(params["order"], check.DeepEquals, []string{"foo", "bar desc"})
} else {
diff --git a/lib/controller/router/router.go b/lib/controller/router/router.go
index 80d5e92985..054bcffaf7 100644
--- a/lib/controller/router/router.go
+++ b/lib/controller/router/router.go
@@ -86,6 +86,41 @@ func (rtr *router) addRoutes() {
return rtr.backend.Logout(ctx, *opts.(*arvados.LogoutOptions))
},
},
+ {
+ arvados.EndpointAuthorizedKeyCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.AuthorizedKeyCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointAuthorizedKeyUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.AuthorizedKeyUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointAuthorizedKeyGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.AuthorizedKeyGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointAuthorizedKeyList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.AuthorizedKeyList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointAuthorizedKeyDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.AuthorizedKeyDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
{
arvados.EndpointCollectionCreate,
func() interface{} { return &arvados.CreateOptions{} },
@@ -156,6 +191,13 @@ func (rtr *router) addRoutes() {
return rtr.backend.ContainerCreate(ctx, *opts.(*arvados.CreateOptions))
},
},
+ {
+ arvados.EndpointContainerPriorityUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerPriorityUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
{
arvados.EndpointContainerUpdate,
func() interface{} { return &arvados.UpdateOptions{} },
@@ -184,41 +226,6 @@ func (rtr *router) addRoutes() {
return rtr.backend.ContainerDelete(ctx, *opts.(*arvados.DeleteOptions))
},
},
- {
- arvados.EndpointContainerRequestCreate,
- func() interface{} { return &arvados.CreateOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.ContainerRequestCreate(ctx, *opts.(*arvados.CreateOptions))
- },
- },
- {
- arvados.EndpointContainerRequestUpdate,
- func() interface{} { return &arvados.UpdateOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.ContainerRequestUpdate(ctx, *opts.(*arvados.UpdateOptions))
- },
- },
- {
- arvados.EndpointContainerRequestGet,
- func() interface{} { return &arvados.GetOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.ContainerRequestGet(ctx, *opts.(*arvados.GetOptions))
- },
- },
- {
- arvados.EndpointContainerRequestList,
- func() interface{} { return &arvados.ListOptions{Limit: -1} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.ContainerRequestList(ctx, *opts.(*arvados.ListOptions))
- },
- },
- {
- arvados.EndpointContainerRequestDelete,
- func() interface{} { return &arvados.DeleteOptions{} },
- func(ctx context.Context, opts interface{}) (interface{}, error) {
- return rtr.backend.ContainerRequestDelete(ctx, *opts.(*arvados.DeleteOptions))
- },
- },
{
arvados.EndpointContainerLock,
func() interface{} {
@@ -244,6 +251,13 @@ func (rtr *router) addRoutes() {
return rtr.backend.ContainerSSH(ctx, *opts.(*arvados.ContainerSSHOptions))
},
},
+ {
+ arvados.EndpointContainerSSHCompat,
+ func() interface{} { return &arvados.ContainerSSHOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerSSH(ctx, *opts.(*arvados.ContainerSSHOptions))
+ },
+ },
{
// arvados-client built before commit
// bdc29d3129f6d75aa9ce0a24ffb849a272b06f08
@@ -262,6 +276,62 @@ func (rtr *router) addRoutes() {
return rtr.backend.ContainerGatewayTunnel(ctx, *opts.(*arvados.ContainerGatewayTunnelOptions))
},
},
+ {
+ arvados.EndpointContainerGatewayTunnelCompat,
+ func() interface{} { return &arvados.ContainerGatewayTunnelOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerGatewayTunnel(ctx, *opts.(*arvados.ContainerGatewayTunnelOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestContainerStatus,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestContainerStatus(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointContainerRequestLog,
+ func() interface{} { return &arvados.ContainerLogOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.ContainerRequestLog(ctx, *opts.(*arvados.ContainerLogOptions))
+ },
+ },
{
arvados.EndpointGroupCreate,
func() interface{} { return &arvados.CreateOptions{} },
@@ -367,6 +437,41 @@ func (rtr *router) addRoutes() {
return rtr.backend.LinkDelete(ctx, *opts.(*arvados.DeleteOptions))
},
},
+ {
+ arvados.EndpointLogCreate,
+ func() interface{} { return &arvados.CreateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LogCreate(ctx, *opts.(*arvados.CreateOptions))
+ },
+ },
+ {
+ arvados.EndpointLogUpdate,
+ func() interface{} { return &arvados.UpdateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LogUpdate(ctx, *opts.(*arvados.UpdateOptions))
+ },
+ },
+ {
+ arvados.EndpointLogList,
+ func() interface{} { return &arvados.ListOptions{Limit: -1} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LogList(ctx, *opts.(*arvados.ListOptions))
+ },
+ },
+ {
+ arvados.EndpointLogGet,
+ func() interface{} { return &arvados.GetOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LogGet(ctx, *opts.(*arvados.GetOptions))
+ },
+ },
+ {
+ arvados.EndpointLogDelete,
+ func() interface{} { return &arvados.DeleteOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.backend.LogDelete(ctx, *opts.(*arvados.DeleteOptions))
+ },
+ },
{
arvados.EndpointSpecimenCreate,
func() interface{} { return &arvados.CreateOptions{} },
@@ -543,9 +648,23 @@ func (rtr *router) addRoutes() {
rtr.addRoute(route.endpoint, route.defaultOpts, exec)
}
rtr.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if req.Method == "OPTIONS" {
+ // For non-webdav endpoints, return an empty
+ // response with the CORS headers we already
+ // added in ServeHTTP.
+ w.WriteHeader(http.StatusOK)
+ return
+ }
httpserver.Errors(w, []string{"API endpoint not found"}, http.StatusNotFound)
})
rtr.mux.MethodNotAllowedHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if req.Method == "OPTIONS" {
+ // For non-webdav endpoints, return an empty
+ // response with the CORS headers we already
+ // added in ServeHTTP.
+ w.WriteHeader(http.StatusOK)
+ return
+ }
httpserver.Errors(w, []string{"API endpoint not found"}, http.StatusMethodNotAllowed)
})
}
@@ -560,9 +679,14 @@ func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() int
if alt, ok := altMethod[endpoint.Method]; ok {
methods = append(methods, alt)
}
+ if strings.HasSuffix(endpoint.Path, ".*}") {
+ // webdav methods
+ methods = append(methods, "OPTIONS", "PROPFIND")
+ }
rtr.mux.Methods(methods...).Path("/" + endpoint.Path).HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
logger := ctxlog.FromContext(req.Context())
- params, err := rtr.loadRequestParams(req, endpoint.AttrsKey)
+ opts := defaultOpts()
+ params, err := rtr.loadRequestParams(req, endpoint.AttrsKey, opts)
if err != nil {
logger.WithFields(logrus.Fields{
"req": req,
@@ -572,13 +696,6 @@ func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() int
rtr.sendError(w, err)
return
}
- opts := defaultOpts()
- err = rtr.transcode(params, opts)
- if err != nil {
- logger.WithField("params", params).WithError(err).Debugf("error transcoding params to %T", opts)
- rtr.sendError(w, err)
- return
- }
respOpts, err := rtr.responseOptions(opts)
if err != nil {
logger.WithField("opts", opts).WithError(err).Debugf("error getting response options from %T", opts)
@@ -601,11 +718,8 @@ func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() int
}
ctx := auth.NewContext(req.Context(), creds)
ctx = arvados.ContextWithRequestID(ctx, req.Header.Get("X-Request-Id"))
- logger.WithFields(logrus.Fields{
- "apiEndpoint": endpoint,
- "apiOptsType": fmt.Sprintf("%T", opts),
- "apiOpts": opts,
- }).Debug("exec")
+ req = req.WithContext(ctx)
+
// Extract the token UUIDs (or a placeholder for v1 tokens)
var tokenUUIDs []string
for _, t := range creds.Tokens {
@@ -622,7 +736,13 @@ func (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() int
tokenUUIDs = append(tokenUUIDs, "v1 token ending in "+end)
}
}
- httpserver.SetResponseLogFields(req.Context(), logrus.Fields{"tokenUUIDs": tokenUUIDs})
+ httpserver.SetResponseLogFields(ctx, logrus.Fields{"tokenUUIDs": tokenUUIDs})
+
+ logger.WithFields(logrus.Fields{
+ "apiEndpoint": endpoint,
+ "apiOptsType": fmt.Sprintf("%T", opts),
+ "apiOpts": opts,
+ }).Debug("exec")
resp, err := exec(ctx, opts)
if err != nil {
logger.WithError(err).Debugf("returning error type %T", err)
@@ -638,13 +758,11 @@ func (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
case "login", "logout", "auth":
default:
w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, PUT, POST, PATCH, DELETE")
- w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-Http-Method-Override")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS, PROPFIND, PUT, POST, PATCH, DELETE")
+ w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Range, X-Http-Method-Override")
+ w.Header().Set("Access-Control-Expose-Headers", "Content-Range")
w.Header().Set("Access-Control-Max-Age", "86486400")
}
- if r.Method == "OPTIONS" {
- return
- }
if r.Body != nil {
// Wrap r.Body in a http.MaxBytesReader(), otherwise
// r.ParseForm() uses a default max request body size
diff --git a/lib/controller/router/router_test.go b/lib/controller/router/router_test.go
index 11b090a214..a8359a4400 100644
--- a/lib/controller/router/router_test.go
+++ b/lib/controller/router/router_test.go
@@ -47,14 +47,15 @@ func (s *RouterSuite) SetUpTest(c *check.C) {
func (s *RouterSuite) TestOptions(c *check.C) {
token := arvadostest.ActiveToken
for _, trial := range []struct {
- comment string // unparsed -- only used to help match test failures to trials
- method string
- path string
- header http.Header
- body string
- shouldStatus int // zero value means 200
- shouldCall string
- withOptions interface{}
+ comment string // unparsed -- only used to help match test failures to trials
+ method string
+ path string
+ header http.Header
+ body string
+ unauthenticated bool
+ shouldStatus int // zero value means 200
+ shouldCall string
+ withOptions interface{}
}{
{
method: "GET",
@@ -174,6 +175,114 @@ func (s *RouterSuite) TestOptions(c *check.C) {
path: "/arvados/v1/collections",
shouldStatus: http.StatusMethodNotAllowed,
},
+ {
+ comment: "container log webdav GET root",
+ method: "GET",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "/",
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "GET",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID + "/"}},
+ },
+ {
+ comment: "container log webdav GET root without trailing slash",
+ method: "GET",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "",
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "GET",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID}},
+ },
+ {
+ comment: "container log webdav OPTIONS root",
+ method: "OPTIONS",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "/",
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "OPTIONS",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID + "/"}},
+ },
+ {
+ comment: "container log webdav OPTIONS root without trailing slash",
+ method: "OPTIONS",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID,
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "OPTIONS",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID}},
+ },
+ {
+ comment: "container log webdav OPTIONS for CORS",
+ unauthenticated: true,
+ method: "OPTIONS",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "/",
+ header: http.Header{"Access-Control-Request-Method": {"POST"}},
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "OPTIONS",
+ Header: http.Header{
+ "Access-Control-Request-Method": {"POST"},
+ },
+ Path: "/" + arvadostest.CompletedContainerUUID + "/"}},
+ },
+ {
+ comment: "container log webdav PROPFIND root",
+ method: "PROPFIND",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "/",
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "PROPFIND",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID + "/"}},
+ },
+ {
+ comment: "container log webdav PROPFIND root without trailing slash",
+ method: "PROPFIND",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "",
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "PROPFIND",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID}},
+ },
+ {
+ comment: "container log webdav no_forward=true",
+ method: "GET",
+ path: "/arvados/v1/container_requests/" + arvadostest.CompletedContainerRequestUUID + "/log/" + arvadostest.CompletedContainerUUID + "/?no_forward=true",
+ shouldCall: "ContainerRequestLog",
+ withOptions: arvados.ContainerLogOptions{
+ UUID: arvadostest.CompletedContainerRequestUUID,
+ NoForward: true,
+ WebDAVOptions: arvados.WebDAVOptions{
+ Method: "GET",
+ Header: http.Header{"Authorization": {"Bearer " + arvadostest.ActiveToken}},
+ Path: "/" + arvadostest.CompletedContainerUUID + "/"}},
+ },
+ {
+ comment: "/logX does not route to ContainerRequestLog",
+ method: "GET",
+ path: "/arvados/v1/containers/" + arvadostest.CompletedContainerRequestUUID + "/logX",
+ shouldStatus: http.StatusNotFound,
+ shouldCall: "",
+ },
} {
// Reset calls captured in previous trial
s.stub = arvadostest.APIStub{}
@@ -181,7 +290,7 @@ func (s *RouterSuite) TestOptions(c *check.C) {
c.Logf("trial: %+v", trial)
comment := check.Commentf("trial comment: %s", trial.comment)
- _, rr, _ := doRequest(c, s.rtr, token, trial.method, trial.path, trial.header, bytes.NewBufferString(trial.body))
+ _, rr := doRequest(c, s.rtr, token, trial.method, trial.path, !trial.unauthenticated, trial.header, bytes.NewBufferString(trial.body), nil)
if trial.shouldStatus == 0 {
c.Check(rr.Code, check.Equals, http.StatusOK, comment)
} else {
@@ -222,7 +331,8 @@ func (s *RouterIntegrationSuite) TestCollectionResponses(c *check.C) {
token := arvadostest.ActiveTokenV2
// Check "get collection" response has "kind" key
- _, rr, jresp := doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections`, nil, bytes.NewBufferString(`{"include_trash":true}`))
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections`, true, nil, bytes.NewBufferString(`{"include_trash":true}`), jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["items"], check.FitsTypeOf, []interface{}{})
c.Check(jresp["kind"], check.Equals, "arvados#collectionList")
@@ -236,7 +346,8 @@ func (s *RouterIntegrationSuite) TestCollectionResponses(c *check.C) {
`,"select":["name"]`,
`,"select":["uuid"]`,
} {
- _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections`, nil, bytes.NewBufferString(`{"where":{"uuid":["`+arvadostest.FooCollection+`"]}`+selectj+`}`))
+ jresp := map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections`, true, nil, bytes.NewBufferString(`{"where":{"uuid":["`+arvadostest.FooCollection+`"]}`+selectj+`}`), jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["items"], check.FitsTypeOf, []interface{}{})
c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
@@ -261,7 +372,8 @@ func (s *RouterIntegrationSuite) TestCollectionResponses(c *check.C) {
}
// Check "create collection" response has "kind" key
- _, rr, jresp = doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, http.Header{"Content-Type": {"application/x-www-form-urlencoded"}}, bytes.NewBufferString(`ensure_unique_name=true`))
+ jresp = map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, true, http.Header{"Content-Type": {"application/x-www-form-urlencoded"}}, bytes.NewBufferString(`ensure_unique_name=true`), jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["uuid"], check.FitsTypeOf, "")
c.Check(jresp["kind"], check.Equals, "arvados#collection")
@@ -286,11 +398,11 @@ func (s *RouterIntegrationSuite) TestMaxRequestSize(c *check.C) {
hdr := http.Header{"Content-Type": {"application/x-www-form-urlencoded"}}
body := bytes.NewBufferString(url.Values{"foo_bar": {okstr}}.Encode())
- _, rr, _ := doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, hdr, body)
+ _, rr := doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, true, hdr, body, nil)
c.Check(rr.Code, check.Equals, http.StatusOK)
body = bytes.NewBufferString(url.Values{"foo_bar": {okstr + okstr}}.Encode())
- _, rr, _ = doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, hdr, body)
+ _, rr = doRequest(c, s.rtr, token, "POST", `/arvados/v1/collections`, true, hdr, body, nil)
c.Check(rr.Code, check.Equals, http.StatusRequestEntityTooLarge)
}
}
@@ -298,20 +410,23 @@ func (s *RouterIntegrationSuite) TestMaxRequestSize(c *check.C) {
func (s *RouterIntegrationSuite) TestContainerList(c *check.C) {
token := arvadostest.ActiveTokenV2
- _, rr, jresp := doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?limit=0`, nil, nil)
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?limit=0`, true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
c.Check(jresp["items_available"].(float64) > 2, check.Equals, true)
c.Check(jresp["items"], check.NotNil)
c.Check(jresp["items"], check.HasLen, 0)
- _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?filters=[["uuid","in",[]]]`, nil, nil)
+ jresp = map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?filters=[["uuid","in",[]]]`, true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["items_available"], check.Equals, float64(0))
c.Check(jresp["items"], check.NotNil)
c.Check(jresp["items"], check.HasLen, 0)
- _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?limit=2&select=["uuid","command"]`, nil, nil)
+ jresp = map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers?limit=2&select=["uuid","command"]`, true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
c.Check(jresp["items_available"].(float64) > 2, check.Equals, true)
@@ -322,7 +437,8 @@ func (s *RouterIntegrationSuite) TestContainerList(c *check.C) {
c.Check(item0["command"].([]interface{})[0], check.FitsTypeOf, "")
c.Check(item0["mounts"], check.IsNil)
- _, rr, jresp = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers`, nil, nil)
+ jresp = map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "GET", `/arvados/v1/containers`, true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["items_available"], check.FitsTypeOf, float64(0))
c.Check(jresp["items_available"].(float64) > 2, check.Equals, true)
@@ -338,25 +454,33 @@ func (s *RouterIntegrationSuite) TestContainerList(c *check.C) {
func (s *RouterIntegrationSuite) TestContainerLock(c *check.C) {
uuid := arvadostest.QueuedContainerUUID
token := arvadostest.AdminToken
- _, rr, jresp := doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/lock", nil, nil)
+
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/lock", true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["uuid"], check.HasLen, 27)
c.Check(jresp["state"], check.Equals, "Locked")
- _, rr, _ = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/lock", nil, nil)
+
+ _, rr = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/lock", true, nil, nil, nil)
c.Check(rr.Code, check.Equals, http.StatusUnprocessableEntity)
c.Check(rr.Body.String(), check.Not(check.Matches), `.*"uuid":.*`)
- _, rr, jresp = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/unlock", nil, nil)
+
+ jresp = map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/unlock", true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["uuid"], check.HasLen, 27)
c.Check(jresp["state"], check.Equals, "Queued")
c.Check(jresp["environment"], check.IsNil)
- _, rr, jresp = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/unlock", nil, nil)
+
+ jresp = map[string]interface{}{}
+ _, rr = doRequest(c, s.rtr, token, "POST", "/arvados/v1/containers/"+uuid+"/unlock", true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusUnprocessableEntity)
c.Check(jresp["uuid"], check.IsNil)
}
func (s *RouterIntegrationSuite) TestWritableBy(c *check.C) {
- _, rr, jresp := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, "GET", `/arvados/v1/users/`+arvadostest.ActiveUserUUID, nil, nil)
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, "GET", `/arvados/v1/users/`+arvadostest.ActiveUserUUID, true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["writable_by"], check.DeepEquals, []interface{}{"zzzzz-tpzed-000000000000000", "zzzzz-tpzed-xurymjxw79nv3jz", "zzzzz-j7d0g-48foin4vonvc2at"})
}
@@ -365,7 +489,8 @@ func (s *RouterIntegrationSuite) TestFullTimestampsInResponse(c *check.C) {
uuid := arvadostest.CollectionReplicationDesired2Confirmed2UUID
token := arvadostest.ActiveTokenV2
- _, rr, jresp := doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections/`+uuid, nil, nil)
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "GET", `/arvados/v1/collections/`+uuid, true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
c.Check(jresp["uuid"], check.Equals, uuid)
expectNS := map[string]int{
@@ -392,14 +517,15 @@ func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
} {
j, err := json.Marshal(sel)
c.Assert(err, check.IsNil)
- _, rr, resp := doRequest(c, s.rtr, token, "GET", "/arvados/v1/containers/"+uuid+"?select="+string(j), nil, nil)
+ jresp := map[string]interface{}{}
+ _, rr := doRequest(c, s.rtr, token, "GET", "/arvados/v1/containers/"+uuid+"?select="+string(j), true, nil, nil, jresp)
c.Check(rr.Code, check.Equals, http.StatusOK)
- c.Check(resp["kind"], check.Equals, "arvados#container")
- c.Check(resp["uuid"], check.HasLen, 27)
- c.Check(resp["command"], check.HasLen, 2)
- c.Check(resp["mounts"], check.IsNil)
- _, hasMounts := resp["mounts"]
+ c.Check(jresp["kind"], check.Equals, "arvados#container")
+ c.Check(jresp["uuid"], check.HasLen, 27)
+ c.Check(jresp["command"], check.HasLen, 2)
+ c.Check(jresp["mounts"], check.IsNil)
+ _, hasMounts := jresp["mounts"]
c.Check(hasMounts, check.Equals, false)
}
// POST & PUT
@@ -409,23 +535,23 @@ func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
for _, method := range []string{"PUT", "POST"} {
desc := "Today is " + time.Now().String()
reqBody := "{\"description\":\"" + desc + "\"}"
- var resp map[string]interface{}
+ jresp := map[string]interface{}{}
var rr *httptest.ResponseRecorder
if method == "PUT" {
- _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections/"+uuid+"?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+ _, rr = doRequest(c, s.rtr, token, method, "/arvados/v1/collections/"+uuid+"?select="+string(j), true, nil, bytes.NewReader([]byte(reqBody)), jresp)
} else {
- _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+ _, rr = doRequest(c, s.rtr, token, method, "/arvados/v1/collections?select="+string(j), true, nil, bytes.NewReader([]byte(reqBody)), jresp)
}
c.Check(rr.Code, check.Equals, http.StatusOK)
- c.Check(resp["kind"], check.Equals, "arvados#collection")
- c.Check(resp["uuid"], check.HasLen, 27)
- c.Check(resp["description"], check.Equals, desc)
- c.Check(resp["manifest_text"], check.IsNil)
+ c.Check(jresp["kind"], check.Equals, "arvados#collection")
+ c.Check(jresp["uuid"], check.HasLen, 27)
+ c.Check(jresp["description"], check.Equals, desc)
+ c.Check(jresp["manifest_text"], check.IsNil)
}
}
func (s *RouterIntegrationSuite) TestHEAD(c *check.C) {
- _, rr, _ := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, "HEAD", "/arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil, nil)
+ _, rr := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, "HEAD", "/arvados/v1/containers/"+arvadostest.QueuedContainerUUID, true, nil, nil, nil)
c.Check(rr.Code, check.Equals, http.StatusOK)
}
@@ -497,17 +623,24 @@ func (s *RouterIntegrationSuite) TestCORS(c *check.C) {
}
}
-func doRequest(c *check.C, rtr http.Handler, token, method, path string, hdrs http.Header, body io.Reader) (*http.Request, *httptest.ResponseRecorder, map[string]interface{}) {
+func doRequest(c *check.C, rtr http.Handler, token, method, path string, auth bool, hdrs http.Header, body io.Reader, jresp map[string]interface{}) (*http.Request, *httptest.ResponseRecorder) {
req := httptest.NewRequest(method, path, body)
for k, v := range hdrs {
req.Header[k] = v
}
- req.Header.Set("Authorization", "Bearer "+token)
+ if auth {
+ req.Header.Set("Authorization", "Bearer "+token)
+ }
rr := httptest.NewRecorder()
rtr.ServeHTTP(rr, req)
- c.Logf("response body: %s", rr.Body.String())
- var jresp map[string]interface{}
- err := json.Unmarshal(rr.Body.Bytes(), &jresp)
- c.Check(err, check.IsNil)
- return req, rr, jresp
+ respbody := rr.Body.String()
+ if len(respbody) > 10000 {
+ respbody = respbody[:10000] + "[...]"
+ }
+ c.Logf("response body: %s", respbody)
+ if jresp != nil {
+ err := json.Unmarshal(rr.Body.Bytes(), &jresp)
+ c.Check(err, check.IsNil)
+ }
+ return req, rr
}
diff --git a/lib/controller/rpc/conn.go b/lib/controller/rpc/conn.go
index 0e532f23c0..c6be679a25 100644
--- a/lib/controller/rpc/conn.go
+++ b/lib/controller/rpc/conn.go
@@ -16,9 +16,11 @@ import (
"io/ioutil"
"net"
"net/http"
+ "net/http/httputil"
"net/url"
"strconv"
"strings"
+ "sync"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -43,10 +45,13 @@ type Conn struct {
SendHeader http.Header
RedactHostInErrors bool
- clusterID string
- httpClient http.Client
- baseURL url.URL
- tokenProvider TokenProvider
+ clusterID string
+ httpClient http.Client
+ baseURL url.URL
+ tokenProvider TokenProvider
+ discoveryDocument *arvados.DiscoveryDocument
+ discoveryDocumentMtx sync.Mutex
+ discoveryDocumentExpires time.Time
}
func NewConn(clusterID string, url *url.URL, insecure bool, tp TokenProvider) *Conn {
@@ -88,6 +93,8 @@ func (conn *Conn) requestAndDecode(ctx context.Context, dst interface{}, ep arva
Scheme: conn.baseURL.Scheme,
APIHost: conn.baseURL.Host,
SendHeader: conn.SendHeader,
+ // Disable auto-retry
+ Timeout: 0,
}
tokens, err := conn.tokenProvider(ctx)
if err != nil {
@@ -143,10 +150,13 @@ func (conn *Conn) requestAndDecode(ctx context.Context, dst interface{}, ep arva
}
if len(tokens) > 1 {
+ if params == nil {
+ params = make(map[string]interface{})
+ }
params["reader_tokens"] = tokens[1:]
}
path := ep.Path
- if strings.Contains(ep.Path, "/{uuid}") {
+ if strings.Contains(ep.Path, "/{uuid}") && params != nil {
uuid, _ := params["uuid"].(string)
path = strings.Replace(path, "/{uuid}", "/"+uuid, 1)
delete(params, "uuid")
@@ -186,6 +196,22 @@ func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error)
return resp, err
}
+func (conn *Conn) DiscoveryDocument(ctx context.Context) (arvados.DiscoveryDocument, error) {
+ conn.discoveryDocumentMtx.Lock()
+ defer conn.discoveryDocumentMtx.Unlock()
+ if conn.discoveryDocument != nil && time.Now().Before(conn.discoveryDocumentExpires) {
+ return *conn.discoveryDocument, nil
+ }
+ var dd arvados.DiscoveryDocument
+ err := conn.requestAndDecode(ctx, &dd, arvados.EndpointDiscoveryDocument, nil, nil)
+ if err != nil {
+ return dd, err
+ }
+ conn.discoveryDocument = &dd
+ conn.discoveryDocumentExpires = time.Now().Add(time.Hour)
+ return *conn.discoveryDocument, nil
+}
+
func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
ep := arvados.EndpointLogin
var resp arvados.LoginResponse
@@ -217,6 +243,41 @@ func (conn *Conn) relativeToBaseURL(location string) string {
return location
}
+func (conn *Conn) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {
+ ep := arvados.EndpointAuthorizedKeyCreate
+ var resp arvados.AuthorizedKey
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) AuthorizedKeyUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.AuthorizedKey, error) {
+ ep := arvados.EndpointAuthorizedKeyUpdate
+ var resp arvados.AuthorizedKey
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) AuthorizedKeyGet(ctx context.Context, options arvados.GetOptions) (arvados.AuthorizedKey, error) {
+ ep := arvados.EndpointAuthorizedKeyGet
+ var resp arvados.AuthorizedKey
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {
+ ep := arvados.EndpointAuthorizedKeyList
+ var resp arvados.AuthorizedKeyList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) AuthorizedKeyDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.AuthorizedKey, error) {
+ ep := arvados.EndpointAuthorizedKeyDelete
+ var resp arvados.AuthorizedKey
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
func (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
ep := arvados.EndpointCollectionCreate
var resp arvados.Collection
@@ -294,6 +355,13 @@ func (conn *Conn) ContainerUpdate(ctx context.Context, options arvados.UpdateOpt
return resp, err
}
+func (conn *Conn) ContainerPriorityUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
+ ep := arvados.EndpointContainerPriorityUpdate
+ var resp arvados.Container
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
func (conn *Conn) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
ep := arvados.EndpointContainerGet
var resp arvados.Container
@@ -333,7 +401,7 @@ func (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOption
// a running container. If the returned error is nil, the caller is
// responsible for closing sshconn.Conn.
func (conn *Conn) ContainerSSH(ctx context.Context, options arvados.ContainerSSHOptions) (sshconn arvados.ConnectionResponse, err error) {
- u, err := conn.baseURL.Parse("/" + strings.Replace(arvados.EndpointContainerSSH.Path, "{uuid}", options.UUID, -1))
+ u, err := conn.baseURL.Parse("/" + strings.Replace(arvados.EndpointContainerSSHCompat.Path, "{uuid}", options.UUID, -1))
if err != nil {
err = fmt.Errorf("url.Parse: %w", err)
return
@@ -349,7 +417,7 @@ func (conn *Conn) ContainerSSH(ctx context.Context, options arvados.ContainerSSH
// the controller. The caller should connect the returned resp.Conn to
// a client-side yamux session.
func (conn *Conn) ContainerGatewayTunnel(ctx context.Context, options arvados.ContainerGatewayTunnelOptions) (tunnelconn arvados.ConnectionResponse, err error) {
- u, err := conn.baseURL.Parse("/" + strings.Replace(arvados.EndpointContainerGatewayTunnel.Path, "{uuid}", options.UUID, -1))
+ u, err := conn.baseURL.Parse("/" + strings.Replace(arvados.EndpointContainerGatewayTunnelCompat.Path, "{uuid}", options.UUID, -1))
if err != nil {
err = fmt.Errorf("url.Parse: %w", err)
return
@@ -414,11 +482,11 @@ func (conn *Conn) socket(ctx context.Context, u *url.URL, upgradeHeader string,
} else {
message = fmt.Sprintf("%q", body)
}
- return connresp, fmt.Errorf("server did not provide a tunnel: %s: %s", resp.Status, message)
+ return connresp, httpserver.ErrorWithStatus(fmt.Errorf("server did not provide a tunnel: %s: %s", resp.Status, message), resp.StatusCode)
}
if strings.ToLower(resp.Header.Get("Upgrade")) != upgradeHeader ||
strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
- return connresp, fmt.Errorf("bad response from server: Upgrade %q Connection %q", resp.Header.Get("Upgrade"), resp.Header.Get("Connection"))
+ return connresp, httpserver.ErrorWithStatus(fmt.Errorf("bad response from server: Upgrade %q Connection %q", resp.Header.Get("Upgrade"), resp.Header.Get("Connection")), http.StatusBadGateway)
}
connresp.Conn = netconn
connresp.Bufrw = &bufio.ReadWriter{Reader: bufr, Writer: bufw}
@@ -461,6 +529,26 @@ func (conn *Conn) ContainerRequestDelete(ctx context.Context, options arvados.De
return resp, err
}
+func (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, options arvados.GetOptions) (arvados.ContainerStatus, error) {
+ ep := arvados.EndpointContainerRequestContainerStatus
+ var resp arvados.ContainerStatus
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (resp http.Handler, err error) {
+ proxy := &httputil.ReverseProxy{
+ Transport: conn.httpClient.Transport,
+ Director: func(r *http.Request) {
+ u := conn.baseURL
+ u.Path = r.URL.Path
+ u.RawQuery = fmt.Sprintf("no_forward=%v", options.NoForward)
+ r.URL = &u
+ },
+ }
+ return proxy, nil
+}
+
func (conn *Conn) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {
ep := arvados.EndpointGroupCreate
var resp arvados.Group
@@ -559,6 +647,41 @@ func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions)
return resp, err
}
+func (conn *Conn) LogCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Log, error) {
+ ep := arvados.EndpointLogCreate
+ var resp arvados.Log
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LogUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Log, error) {
+ ep := arvados.EndpointLogUpdate
+ var resp arvados.Log
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LogGet(ctx context.Context, options arvados.GetOptions) (arvados.Log, error) {
+ ep := arvados.EndpointLogGet
+ var resp arvados.Log
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {
+ ep := arvados.EndpointLogList
+ var resp arvados.LogList
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
+func (conn *Conn) LogDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Log, error) {
+ ep := arvados.EndpointLogDelete
+ var resp arvados.Log
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
+
func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
ep := arvados.EndpointSpecimenCreate
var resp arvados.Specimen
diff --git a/lib/controller/rpc/conn_test.go b/lib/controller/rpc/conn_test.go
index eee8db9ac8..0d1200fe12 100644
--- a/lib/controller/rpc/conn_test.go
+++ b/lib/controller/rpc/conn_test.go
@@ -10,6 +10,7 @@ import (
"os"
"testing"
+ "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
@@ -39,13 +40,26 @@ type RPCSuite struct {
func (s *RPCSuite) SetUpTest(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
s.ctx = context.WithValue(ctx, contextKeyTestTokens, []string{arvadostest.ActiveToken})
- s.conn = NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_TEST_API_HOST")}, true, func(ctx context.Context) ([]string, error) {
+}
+
+func (s *RPCSuite) setupConn(c *check.C, host string) {
+ s.conn = NewConn("zzzzz", &url.URL{Scheme: "https", Host: host}, true, func(ctx context.Context) ([]string, error) {
tokens, _ := ctx.Value(contextKeyTestTokens).([]string)
return tokens, nil
})
}
-func (s *RPCSuite) TestLogin(c *check.C) {
+func (s *RPCSuite) workbench2URL(c *check.C) string {
+ loader := config.NewLoader(nil, s.log)
+ cfg, err := loader.Load()
+ c.Assert(err, check.IsNil)
+ cluster, err := cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ return cluster.Services.Workbench2.ExternalURL.String()
+}
+
+func (s *RPCSuite) TestRailsLogin404(c *check.C) {
+ s.setupConn(c, os.Getenv("ARVADOS_TEST_API_HOST"))
s.ctx = context.Background()
opts := arvados.LoginOptions{
ReturnTo: "https://foo.example.com/bar",
@@ -54,17 +68,30 @@ func (s *RPCSuite) TestLogin(c *check.C) {
c.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 404)
}
-func (s *RPCSuite) TestLogout(c *check.C) {
+func (s *RPCSuite) TestRailsLogout404(c *check.C) {
+ s.setupConn(c, os.Getenv("ARVADOS_TEST_API_HOST"))
s.ctx = context.Background()
opts := arvados.LogoutOptions{
ReturnTo: "https://foo.example.com/bar",
}
+ _, err := s.conn.Logout(s.ctx, opts)
+ c.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 404)
+}
+
+func (s *RPCSuite) TestControllerLogout(c *check.C) {
+ s.setupConn(c, os.Getenv("ARVADOS_API_HOST"))
+ s.ctx = context.Background()
+ url := s.workbench2URL(c)
+ opts := arvados.LogoutOptions{
+ ReturnTo: url,
+ }
resp, err := s.conn.Logout(s.ctx, opts)
c.Check(err, check.IsNil)
- c.Check(resp.RedirectLocation, check.Equals, opts.ReturnTo)
+ c.Check(resp.RedirectLocation, check.Equals, url)
}
func (s *RPCSuite) TestCollectionCreate(c *check.C) {
+ s.setupConn(c, os.Getenv("ARVADOS_TEST_API_HOST"))
coll, err := s.conn.CollectionCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
@@ -74,6 +101,7 @@ func (s *RPCSuite) TestCollectionCreate(c *check.C) {
}
func (s *RPCSuite) TestSpecimenCRUD(c *check.C) {
+ s.setupConn(c, os.Getenv("ARVADOS_TEST_API_HOST"))
sp, err := s.conn.SpecimenCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
"owner_uuid": arvadostest.ActiveUserUUID,
"properties": map[string]string{"foo": "bar"},
diff --git a/lib/controller/trash.go b/lib/controller/trash.go
index 551b2f92bb..99e7aec0b6 100644
--- a/lib/controller/trash.go
+++ b/lib/controller/trash.go
@@ -5,6 +5,7 @@
package controller
import (
+ "context"
"time"
"git.arvados.org/arvados.git/lib/controller/dblock"
@@ -12,22 +13,62 @@ import (
"git.arvados.org/arvados.git/sdk/go/ctxlog"
)
-func (h *Handler) trashSweepWorker() {
- sleep := h.Cluster.Collections.TrashSweepInterval.Duration()
- logger := ctxlog.FromContext(h.BackgroundContext).WithField("worker", "trash sweep")
+func (h *Handler) periodicWorker(workerName string, interval time.Duration, locker *dblock.DBLocker, run func(context.Context) error) {
+ logger := ctxlog.FromContext(h.BackgroundContext).WithField("worker", workerName)
ctx := ctxlog.Context(h.BackgroundContext, logger)
- if sleep <= 0 {
- logger.Debugf("Collections.TrashSweepInterval is %v, not running worker", sleep)
+ if interval <= 0 {
+ logger.Debugf("interval is %v, not running worker", interval)
return
}
- dblock.TrashSweep.Lock(ctx, h.db)
- defer dblock.TrashSweep.Unlock()
- for time.Sleep(sleep); ctx.Err() == nil; time.Sleep(sleep) {
- dblock.TrashSweep.Check()
- ctx := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{h.Cluster.SystemRootToken}})
- _, err := h.federation.SysTrashSweep(ctx, struct{}{})
+ if !locker.Lock(ctx, h.dbConnector.GetDB) {
+ // context canceled
+ return
+ }
+ defer locker.Unlock()
+ for time.Sleep(interval); ctx.Err() == nil; time.Sleep(interval) {
+ if !locker.Check() {
+ // context canceled
+ return
+ }
+ err := run(ctx)
if err != nil {
- logger.WithError(err).Info("trash sweep failed")
+ logger.WithError(err).Infof("%s failed", workerName)
}
}
}
+
+func (h *Handler) trashSweepWorker() {
+ h.periodicWorker("trash sweep", h.Cluster.Collections.TrashSweepInterval.Duration(), dblock.TrashSweep, func(ctx context.Context) error {
+ ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{h.Cluster.SystemRootToken}})
+ _, err := h.federation.SysTrashSweep(ctx, struct{}{})
+ return err
+ })
+}
+
+func (h *Handler) containerLogSweepWorker() {
+ h.periodicWorker("container log sweep", h.Cluster.Containers.Logging.SweepInterval.Duration(), dblock.ContainerLogSweep, func(ctx context.Context) error {
+ db, err := h.dbConnector.GetDB(ctx)
+ if err != nil {
+ return err
+ }
+ res, err := db.ExecContext(ctx, `
+DELETE FROM logs
+ USING containers
+ WHERE logs.object_uuid=containers.uuid
+ AND logs.event_type in ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat', 'hoststat', 'node', 'container', 'keepstore')
+ AND containers.log IS NOT NULL
+ AND now() - containers.finished_at > $1::interval`,
+ h.Cluster.Containers.Logging.MaxAge.String())
+ if err != nil {
+ return err
+ }
+ logger := ctxlog.FromContext(ctx)
+ rows, err := res.RowsAffected()
+ if err != nil {
+ logger.WithError(err).Warn("unexpected error from RowsAffected()")
+ } else {
+ logger.WithField("rows", rows).Info("deleted rows from logs table")
+ }
+ return nil
+ })
+}
diff --git a/lib/costanalyzer/costanalyzer.go b/lib/costanalyzer/costanalyzer.go
index a3673c9794..e68e2cb8c1 100644
--- a/lib/costanalyzer/costanalyzer.go
+++ b/lib/costanalyzer/costanalyzer.go
@@ -26,6 +26,8 @@ import (
const timestampFormat = "2006-01-02T15:04:05"
+var pagesize = 1000
+
type nodeInfo struct {
// Legacy (records created by Arvados Node Manager with Arvados <= 1.4.3)
Properties struct {
@@ -355,6 +357,35 @@ func getNode(arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclien
return
}
+func getContainerRequests(ac *arvados.Client, filters []arvados.Filter) ([]arvados.ContainerRequest, error) {
+ var allItems []arvados.ContainerRequest
+ for {
+ pagefilters := append([]arvados.Filter(nil), filters...)
+ if len(allItems) > 0 {
+ pagefilters = append(pagefilters, arvados.Filter{
+ Attr: "uuid",
+ Operator: ">",
+ Operand: allItems[len(allItems)-1].UUID,
+ })
+ }
+ var resp arvados.ContainerRequestList
+ err := ac.RequestAndDecode(&resp, "GET", "arvados/v1/container_requests", nil, arvados.ResourceListParams{
+ Filters: pagefilters,
+ Limit: &pagesize,
+ Order: "uuid",
+ Count: "none",
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error querying container_requests: %w", err)
+ }
+ if len(resp.Items) == 0 {
+ // no more pages
+ return allItems, nil
+ }
+ allItems = append(allItems, resp.Items...)
+ }
+}
+
func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, resultsDir string, cache bool) (cost map[string]consumption, err error) {
cost = make(map[string]consumption)
@@ -363,9 +394,7 @@ func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
if err != nil {
return nil, fmt.Errorf("error loading object %s: %s", uuid, err.Error())
}
-
- var childCrs map[string]interface{}
- filterset := []arvados.Filter{
+ allItems, err := getContainerRequests(ac, []arvados.Filter{
{
Attr: "owner_uuid",
Operator: "=",
@@ -376,29 +405,23 @@ func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvado
Operator: "=",
Operand: nil,
},
- }
- err = ac.RequestAndDecode(&childCrs, "GET", "arvados/v1/container_requests", nil, map[string]interface{}{
- "filters": filterset,
- "limit": 10000,
})
if err != nil {
return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
}
- if value, ok := childCrs["items"]; ok {
- logger.Infof("Collecting top level container requests in project %s", uuid)
- items := value.([]interface{})
- for _, item := range items {
- itemMap := item.(map[string]interface{})
- crInfo, err := generateCrInfo(logger, itemMap["uuid"].(string), arv, ac, kc, resultsDir, cache)
- if err != nil {
- return nil, fmt.Errorf("error generating container_request CSV: %s", err.Error())
- }
- for k, v := range crInfo {
- cost[k] = v
- }
- }
- } else {
+ if len(allItems) == 0 {
logger.Infof("No top level container requests found in project %s", uuid)
+ return
+ }
+ logger.Infof("Collecting top level container requests in project %s", uuid)
+ for _, cr := range allItems {
+ crInfo, err := generateCrInfo(logger, cr.UUID, arv, ac, kc, resultsDir, cache)
+ if err != nil {
+ return nil, fmt.Errorf("error generating container_request CSV for %s: %s", cr.UUID, err)
+ }
+ for k, v := range crInfo {
+ cost[k] = v
+ }
}
return
}
@@ -456,28 +479,20 @@ func generateCrInfo(logger *logrus.Logger, uuid string, arv *arvadosclient.Arvad
csv += tmpCsv
cost[container.UUID] = total
- // Find all container requests that have the container we found above as requesting_container_uuid
- var childCrs arvados.ContainerRequestList
- filterset := []arvados.Filter{
- {
- Attr: "requesting_container_uuid",
- Operator: "=",
- Operand: container.UUID,
- }}
- err = ac.RequestAndDecode(&childCrs, "GET", "arvados/v1/container_requests", nil, map[string]interface{}{
- "filters": filterset,
- "limit": 10000,
- })
- if err != nil {
- return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
- }
- logger.Infof("Collecting child containers for container request %s (%s)", crUUID, container.FinishedAt)
+ // Find all container requests that have the container we
+ // found above as requesting_container_uuid.
+ allItems, err := getContainerRequests(ac, []arvados.Filter{{
+ Attr: "requesting_container_uuid",
+ Operator: "=",
+ Operand: container.UUID,
+ }})
+ logger.Infof("Looking up %d child containers for container %s (%s)", len(allItems), container.UUID, container.FinishedAt)
progressTicker := time.NewTicker(5 * time.Second)
defer progressTicker.Stop()
- for i, cr2 := range childCrs.Items {
+ for i, cr2 := range allItems {
select {
case <-progressTicker.C:
- logger.Infof("... %d of %d", i+1, len(childCrs.Items))
+ logger.Infof("... %d of %d", i+1, len(allItems))
default:
}
node, err := getNode(arv, ac, kc, cr2)
diff --git a/lib/costanalyzer/costanalyzer_test.go b/lib/costanalyzer/costanalyzer_test.go
index b78b288ab0..1054870add 100644
--- a/lib/costanalyzer/costanalyzer_test.go
+++ b/lib/costanalyzer/costanalyzer_test.go
@@ -35,6 +35,10 @@ func (s *Suite) TearDownSuite(c *check.C) {
func (s *Suite) SetUpSuite(c *check.C) {
arvadostest.StartKeep(2, true)
+ // Use a small page size to exercise paging without adding
+ // lots of fixtures
+ pagesize = 2
+
// Get the various arvados, arvadosclient, and keep client objects
ac := arvados.NewClientFromEnv()
arv, err := arvadosclient.MakeArvadosClient()
diff --git a/lib/crunchrun/background.go b/lib/crunchrun/background.go
index 8a919bc5e2..adb65324b8 100644
--- a/lib/crunchrun/background.go
+++ b/lib/crunchrun/background.go
@@ -21,6 +21,7 @@ var (
lockprefix = "crunch-run-"
locksuffix = ".lock"
brokenfile = "crunch-run-broken"
+ pricesfile = "crunch-run-prices.json"
)
// procinfo is saved in each process's lockfile.
@@ -183,7 +184,20 @@ func kill(uuid string, signal syscall.Signal, stdout, stderr io.Writer) error {
}
// ListProcesses lists UUIDs of active crunch-run processes.
-func ListProcesses(stdout, stderr io.Writer) int {
+func ListProcesses(stdin io.Reader, stdout, stderr io.Writer) int {
+ if buf, err := io.ReadAll(stdin); err == nil && len(buf) > 0 {
+ // write latest pricing data to disk where
+ // current/future crunch-run processes can load it
+ fnm := filepath.Join(lockdir, pricesfile)
+ fnmtmp := fmt.Sprintf("%s~%d", fnm, os.Getpid())
+ err := os.WriteFile(fnmtmp, buf, 0777)
+ if err != nil {
+ fmt.Fprintf(stderr, "error writing price data to %s: %s", fnmtmp, err)
+ } else if err = os.Rename(fnmtmp, fnm); err != nil {
+ fmt.Fprintf(stderr, "error renaming %s to %s: %s", fnmtmp, fnm, err)
+ os.Remove(fnmtmp)
+ }
+ }
// filepath.Walk does not follow symlinks, so we must walk
// lockdir+"/." in case lockdir itself is a symlink.
walkdir := lockdir + "/."
@@ -245,7 +259,7 @@ func ListProcesses(stdout, stderr io.Writer) int {
fmt.Fprintf(stderr, "%s: find process %d: %s", path, pi.PID, err)
return nil
}
- err = proc.Signal(syscall.Signal(0))
+ err = proc.Signal(syscall.SIGUSR2)
if err != nil {
// Process is dead, even though lockfile was
// still locked. Most likely a stuck arv-mount
diff --git a/lib/crunchrun/cgroup.go b/lib/crunchrun/cgroup.go
index 48ec93b876..a722e5f142 100644
--- a/lib/crunchrun/cgroup.go
+++ b/lib/crunchrun/cgroup.go
@@ -7,13 +7,16 @@ package crunchrun
import (
"bytes"
"fmt"
- "io/ioutil"
+ "io/fs"
)
// Return the current process's cgroup for the given subsystem.
-func findCgroup(subsystem string) (string, error) {
+//
+// If the host has cgroups v2 and not v1 (i.e., unified mode), return
+// the current process's cgroup.
+func findCgroup(fsys fs.FS, subsystem string) (string, error) {
subsys := []byte(subsystem)
- cgroups, err := ioutil.ReadFile("/proc/self/cgroup")
+ cgroups, err := fs.ReadFile(fsys, "proc/self/cgroup")
if err != nil {
return "", err
}
@@ -22,7 +25,20 @@ func findCgroup(subsystem string) (string, error) {
if len(toks) < 3 {
continue
}
+ if len(toks[1]) == 0 && string(toks[0]) == "0" {
+ // cgroups v2: "0::$PATH"
+ //
+ // In "hybrid" mode, this entry is last, so we
+ // use it when the specified subsystem doesn't
+ // match a cgroups v1 entry.
+ //
+ // In "unified" mode, this is the only entry,
+ // so we use it regardless of which subsystem
+ // was specified.
+ return string(toks[2]), nil
+ }
for _, s := range bytes.Split(toks[1], []byte(",")) {
+ // cgroups v1: "7:cpu,cpuacct:/user.slice"
if bytes.Compare(s, subsys) == 0 {
return string(toks[2]), nil
}
diff --git a/lib/crunchrun/cgroup_test.go b/lib/crunchrun/cgroup_test.go
index eb87456d14..a1acb6fb92 100644
--- a/lib/crunchrun/cgroup_test.go
+++ b/lib/crunchrun/cgroup_test.go
@@ -5,6 +5,11 @@
package crunchrun
import (
+ "bytes"
+ "os"
+ "os/exec"
+ "strings"
+
. "gopkg.in/check.v1"
)
@@ -13,11 +18,57 @@ type CgroupSuite struct{}
var _ = Suite(&CgroupSuite{})
func (s *CgroupSuite) TestFindCgroup(c *C) {
- for _, s := range []string{"devices", "cpu", "cpuset"} {
- g, err := findCgroup(s)
- if c.Check(err, IsNil) {
- c.Check(g, Not(Equals), "", Commentf("subsys %q", s))
+ var testfiles []string
+ buf, err := exec.Command("find", "../crunchstat/testdata", "-name", "cgroup", "-type", "f").Output()
+ c.Assert(err, IsNil)
+ for _, testfile := range bytes.Split(buf, []byte{'\n'}) {
+ if len(testfile) > 0 {
+ testfiles = append(testfiles, string(testfile))
+ }
+ }
+ testfiles = append(testfiles, "/proc/self/cgroup")
+
+ tmpdir := c.MkDir()
+ err = os.MkdirAll(tmpdir+"/proc/self", 0777)
+ c.Assert(err, IsNil)
+ fsys := os.DirFS(tmpdir)
+
+ for _, trial := range []struct {
+ match string // if non-empty, only check testfiles containing this string
+ subsys string
+ expect string // empty means "any" (we never actually expect empty string)
+ }{
+ {"debian11", "blkio", "/user.slice/user-1000.slice/session-5424.scope"},
+ {"debian12", "cpuacct", "/user.slice/user-1000.slice/session-4.scope"},
+ {"debian12", "bogus-does-not-matter", "/user.slice/user-1000.slice/session-4.scope"},
+ {"ubuntu1804", "blkio", "/user.slice"},
+ {"ubuntu1804", "cpuacct", "/user.slice"},
+ {"", "cpu", ""},
+ {"", "cpuset", ""},
+ {"", "devices", ""},
+ {"", "bogus-does-not-matter", ""},
+ } {
+ for _, testfile := range testfiles {
+ if !strings.Contains(testfile, trial.match) {
+ continue
+ }
+ c.Logf("trial %+v testfile %s", trial, testfile)
+
+ // Copy cgroup file into our fake proc/self/ dir
+ buf, err := os.ReadFile(testfile)
+ c.Assert(err, IsNil)
+ err = os.WriteFile(tmpdir+"/proc/self/cgroup", buf, 0777)
+ c.Assert(err, IsNil)
+
+ cgroup, err := findCgroup(fsys, trial.subsys)
+ if !c.Check(err, IsNil) {
+ continue
+ }
+ c.Logf("\tcgroup = %q", cgroup)
+ c.Check(cgroup, Not(Equals), "")
+ if trial.expect != "" {
+ c.Check(cgroup, Equals, trial.expect)
+ }
}
- c.Logf("cgroup(%q) == %q", s, g)
}
}
diff --git a/lib/crunchrun/container_gateway.go b/lib/crunchrun/container_gateway.go
index 3cb93fc746..5b68e2c50e 100644
--- a/lib/crunchrun/container_gateway.go
+++ b/lib/crunchrun/container_gateway.go
@@ -5,6 +5,7 @@
package crunchrun
import (
+ "context"
"crypto/hmac"
"crypto/rand"
"crypto/rsa"
@@ -17,12 +18,14 @@ import (
"net/url"
"os"
"os/exec"
+ "strings"
"sync"
"syscall"
"time"
"git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/lib/selfsigned"
+ "git.arvados.org/arvados.git/lib/webdavfs"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
@@ -31,7 +34,7 @@ import (
"github.com/google/shlex"
"github.com/hashicorp/yamux"
"golang.org/x/crypto/ssh"
- "golang.org/x/net/context"
+ "golang.org/x/net/webdav"
)
type GatewayTarget interface {
@@ -78,6 +81,10 @@ type Gateway struct {
// controller process at the other end of the tunnel.
UpdateTunnelURL func(url string)
+ // Source for serving WebDAV requests with
+ // X-Webdav-Source: /log
+ LogCollection arvados.CollectionFileSystem
+
sshConfig ssh.ServerConfig
requestAuth string
respondAuth string
@@ -157,7 +164,7 @@ func (gw *Gateway) Start() error {
srv := &httpserver.Server{
Server: http.Server{
- Handler: http.HandlerFunc(gw.handleSSH),
+ Handler: gw,
TLSConfig: &tls.Config{
Certificates: []tls.Certificate{cert},
},
@@ -213,7 +220,7 @@ func (gw *Gateway) runTunnel(addr string) error {
AuthSecret: gw.AuthSecret,
})
if err != nil {
- return fmt.Errorf("error creating gateway tunnel: %s", err)
+ return fmt.Errorf("error creating gateway tunnel: %w", err)
}
mux, err := yamux.Client(tun.Conn, nil)
if err != nil {
@@ -260,6 +267,75 @@ func (gw *Gateway) runTunnel(addr string) error {
}
}
+var webdavMethod = map[string]bool{
+ "GET": true,
+ "OPTIONS": true,
+ "PROPFIND": true,
+}
+
+func (gw *Gateway) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Vary", "X-Arvados-Authorization, X-Arvados-Container-Gateway-Uuid, X-Webdav-Prefix, X-Webdav-Source")
+ reqUUID := req.Header.Get("X-Arvados-Container-Gateway-Uuid")
+ if reqUUID == "" {
+ // older controller versions only send UUID as query param
+ req.ParseForm()
+ reqUUID = req.Form.Get("uuid")
+ }
+ if reqUUID != gw.ContainerUUID {
+ http.Error(w, fmt.Sprintf("misdirected request: meant for %q but received by crunch-run %q", reqUUID, gw.ContainerUUID), http.StatusBadGateway)
+ return
+ }
+ if req.Header.Get("X-Arvados-Authorization") != gw.requestAuth {
+ http.Error(w, "bad X-Arvados-Authorization header", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("X-Arvados-Authorization-Response", gw.respondAuth)
+ switch {
+ case req.Method == "POST" && req.Header.Get("Upgrade") == "ssh":
+ gw.handleSSH(w, req)
+ case req.Header.Get("X-Webdav-Source") == "/log":
+ if !webdavMethod[req.Method] {
+ http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+ gw.handleLogsWebDAV(w, req)
+ default:
+ http.Error(w, "path not found", http.StatusNotFound)
+ }
+}
+
+func (gw *Gateway) handleLogsWebDAV(w http.ResponseWriter, r *http.Request) {
+ prefix := r.Header.Get("X-Webdav-Prefix")
+ if !strings.HasPrefix(r.URL.Path, prefix) {
+ http.Error(w, "X-Webdav-Prefix header is not a prefix of the requested path", http.StatusBadRequest)
+ return
+ }
+ if gw.LogCollection == nil {
+ http.Error(w, "Not found", http.StatusNotFound)
+ return
+ }
+ wh := webdav.Handler{
+ Prefix: prefix,
+ FileSystem: &webdavfs.FS{
+ FileSystem: gw.LogCollection,
+ Prefix: "",
+ Writing: false,
+ AlwaysReadEOF: r.Method == "PROPFIND",
+ },
+ LockSystem: webdavfs.NoLockSystem,
+ Logger: gw.webdavLogger,
+ }
+ wh.ServeHTTP(w, r)
+}
+
+func (gw *Gateway) webdavLogger(r *http.Request, err error) {
+ if err != nil && !os.IsNotExist(err) {
+ ctxlog.FromContext(r.Context()).WithError(err).Info("error reported by webdav handler")
+ } else {
+ ctxlog.FromContext(r.Context()).WithError(err).Debug("webdav request log")
+ }
+}
+
// handleSSH connects to an SSH server that allows the caller to run
// interactive commands as root (or any other desired user) inside the
// container. The tunnel itself can only be created by an
@@ -282,22 +358,7 @@ func (gw *Gateway) runTunnel(addr string) error {
// X-Arvados-Login-Username: argument to "docker exec --user": account
// used to run command(s) inside the container.
func (gw *Gateway) handleSSH(w http.ResponseWriter, req *http.Request) {
- // In future we'll handle browser traffic too, but for now the
- // only traffic we expect is an SSH tunnel from
- // (*lib/controller/localdb.Conn)ContainerSSH()
- if req.Method != "POST" || req.Header.Get("Upgrade") != "ssh" {
- http.Error(w, "path not found", http.StatusNotFound)
- return
- }
req.ParseForm()
- if want := req.Form.Get("uuid"); want != gw.ContainerUUID {
- http.Error(w, fmt.Sprintf("misdirected request: meant for %q but received by crunch-run %q", want, gw.ContainerUUID), http.StatusBadGateway)
- return
- }
- if req.Header.Get("X-Arvados-Authorization") != gw.requestAuth {
- http.Error(w, "bad X-Arvados-Authorization header", http.StatusUnauthorized)
- return
- }
detachKeys := req.Form.Get("detach_keys")
username := req.Form.Get("login_username")
if username == "" {
@@ -316,7 +377,6 @@ func (gw *Gateway) handleSSH(w http.ResponseWriter, req *http.Request) {
defer netconn.Close()
w.Header().Set("Connection", "upgrade")
w.Header().Set("Upgrade", "ssh")
- w.Header().Set("X-Arvados-Authorization-Response", gw.respondAuth)
netconn.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n"))
w.Header().Write(netconn)
netconn.Write([]byte("\r\n"))
diff --git a/lib/crunchrun/copier.go b/lib/crunchrun/copier.go
index 72c714dfa4..a081c5d325 100644
--- a/lib/crunchrun/copier.go
+++ b/lib/crunchrun/copier.go
@@ -51,7 +51,6 @@ type filetodo struct {
// manifest, err := (&copier{...}).Copy()
type copier struct {
client *arvados.Client
- arvClient IArvadosClient
keepClient IKeepClient
hostOutputDir string
ctrOutputDir string
@@ -109,7 +108,7 @@ func (cp *copier) Copy() (string, error) {
}
func (cp *copier) copyFile(fs arvados.CollectionFileSystem, f filetodo) (int64, error) {
- cp.logger.Printf("copying %q (%d bytes)", f.dst, f.size)
+ cp.logger.Printf("copying %q (%d bytes)", strings.TrimLeft(f.dst, "/"), f.size)
dst, err := fs.OpenFile(f.dst, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return 0, err
@@ -162,6 +161,20 @@ func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow b
// copy, relative to its mount point -- ".", "./foo.txt", ...
srcRelPath := filepath.Join(".", srcMount.Path, src[len(srcRoot):])
+ // outputRelPath is the path relative in the output directory
+ // that corresponds to the path in the output collection where
+ // the file will go, for logging
+ var outputRelPath = ""
+ if strings.HasPrefix(src, cp.ctrOutputDir) {
+ outputRelPath = strings.TrimPrefix(src[len(cp.ctrOutputDir):], "/")
+ }
+ if outputRelPath == "" {
+ // blank means copy a whole directory, so replace it
+ // with a wildcard to make it a little clearer what's
+ // going on since outputRelPath is only used for logging
+ outputRelPath = "*"
+ }
+
switch {
case srcMount.ExcludeFromOutput:
case srcMount.Kind == "tmp":
@@ -170,12 +183,14 @@ func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow b
case srcMount.Kind != "collection":
return fmt.Errorf("%q: unsupported mount %q in output (kind is %q)", src, srcRoot, srcMount.Kind)
case !srcMount.Writable:
+ cp.logger.Printf("copying %q from %v/%v", outputRelPath, srcMount.PortableDataHash, strings.TrimPrefix(srcRelPath, "./"))
mft, err := cp.getManifest(srcMount.PortableDataHash)
if err != nil {
return err
}
cp.manifest += mft.Extract(srcRelPath, dest).Text
default:
+ cp.logger.Printf("copying %q", outputRelPath)
hostRoot, err := cp.hostRoot(srcRoot)
if err != nil {
return err
@@ -356,7 +371,7 @@ func (cp *copier) getManifest(pdh string) (*manifest.Manifest, error) {
return mft, nil
}
var coll arvados.Collection
- err := cp.arvClient.Get("collections", pdh, nil, &coll)
+ err := cp.client.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+pdh, nil, nil)
if err != nil {
return nil, fmt.Errorf("error retrieving collection record for %q: %s", pdh, err)
}
diff --git a/lib/crunchrun/copier_test.go b/lib/crunchrun/copier_test.go
index 5e92490163..c8936d1a9f 100644
--- a/lib/crunchrun/copier_test.go
+++ b/lib/crunchrun/copier_test.go
@@ -12,7 +12,6 @@ import (
"syscall"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
@@ -27,12 +26,9 @@ type copierSuite struct {
func (s *copierSuite) SetUpTest(c *check.C) {
tmpdir := c.MkDir()
- api, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, check.IsNil)
s.log = bytes.Buffer{}
s.cp = copier{
client: arvados.NewClientFromEnv(),
- arvClient: api,
hostOutputDir: tmpdir,
ctrOutputDir: "/ctr/outdir",
mounts: map[string]arvados.Mount{
diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go
index eadf22876f..556a3bfe13 100644
--- a/lib/crunchrun/crunchrun.go
+++ b/lib/crunchrun/crunchrun.go
@@ -12,6 +12,7 @@ import (
"flag"
"fmt"
"io"
+ "io/fs"
"io/ioutil"
"log"
"net"
@@ -31,6 +32,7 @@ import (
"syscall"
"time"
+ "git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/crunchstat"
@@ -44,14 +46,17 @@ import (
type command struct{}
+var arvadosCertPath = "/etc/arvados/ca-certificates.crt"
+
var Command = command{}
// ConfigData contains environment variables and (when needed) cluster
// configuration, passed from dispatchcloud to crunch-run on stdin.
type ConfigData struct {
- Env map[string]string
- KeepBuffers int
- Cluster *arvados.Cluster
+ Env map[string]string
+ KeepBuffers int
+ EC2SpotCheck bool
+ Cluster *arvados.Cluster
}
// IArvadosClient is the minimal Arvados API methods used by crunch-run.
@@ -73,7 +78,6 @@ type IKeepClient interface {
ReadAt(locator string, p []byte, off int) (int, error)
ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
LocalLocator(locator string) (string, error)
- ClearBlockCache()
SetStorageClasses(sc []string)
}
@@ -140,7 +144,9 @@ type ContainerRunner struct {
MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
finalState string
parentTemp string
+ costStartTime time.Time
+ keepstore *exec.Cmd
keepstoreLogger io.WriteCloser
keepstoreLogbuf *bufThenWrite
statLogger io.WriteCloser
@@ -148,20 +154,12 @@ type ContainerRunner struct {
hoststatLogger io.WriteCloser
hoststatReporter *crunchstat.Reporter
statInterval time.Duration
- cgroupRoot string
- // What we expect the container's cgroup parent to be.
- expectCgroupParent string
// What we tell docker to use as the container's cgroup
- // parent. Note: Ideally we would use the same field for both
- // expectCgroupParent and setCgroupParent, and just make it
- // default to "docker". However, when using docker < 1.10 with
- // systemd, specifying a non-empty cgroup parent (even the
- // default value "docker") hits a docker bug
- // (https://github.com/docker/docker/issues/17126). Using two
- // separate fields makes it possible to use the "expect cgroup
- // parent to be X" feature even on sites where the "specify
- // cgroup parent" feature breaks.
+ // parent.
setCgroupParent string
+ // Fake root dir where crunchstat.Reporter should read OS
+ // files, for testing.
+ crunchstatFakeFS fs.FS
cStateLock sync.Mutex
cCancelled bool // StopContainer() invoked
@@ -175,6 +173,9 @@ type ContainerRunner struct {
containerWatchdogInterval time.Duration
gateway Gateway
+
+ prices []cloud.InstancePrice
+ pricesLock sync.Mutex
}
// setupSignals sets up signal handling to gracefully terminate the
@@ -313,7 +314,12 @@ func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *e
"Block not found error",
"Unhandled exception during FUSE operation",
},
- ReportFunc: runner.reportArvMountWarning,
+ ReportFunc: func(pattern, text string) {
+ runner.updateRuntimeStatus(arvadosclient.Dict{
+ "warning": "arv-mount: " + pattern,
+ "warningDetail": text,
+ })
+ },
}
c.Stdout = runner.arvMountLog
c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner)
@@ -426,8 +432,14 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
arvMountCmd = append(arvMountCmd, "--allow-other")
}
- if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
- arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
+ if runner.Container.RuntimeConstraints.KeepCacheDisk > 0 {
+ keepcachedir, err := runner.MkTempDir(runner.parentTemp, "keepcache")
+ if err != nil {
+ return nil, fmt.Errorf("while creating keep cache temp dir: %v", err)
+ }
+ arvMountCmd = append(arvMountCmd, "--disk-cache", "--disk-cache-dir", keepcachedir, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheDisk))
+ } else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
+ arvMountCmd = append(arvMountCmd, "--ram-cache", "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
}
collectionPaths := []string{}
@@ -483,7 +495,7 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
}
}
- if bind == "/etc/arvados/ca-certificates.crt" {
+ if bind == arvadosCertPath {
needCertMount = false
}
@@ -620,7 +632,7 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
if err != nil {
return nil, fmt.Errorf("creating temp dir: %v", err)
}
- err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
+ err = gitMount(mnt).extractTree(runner.containerClient, tmpdir, token)
if err != nil {
return nil, err
}
@@ -633,10 +645,19 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
}
if needCertMount && runner.Container.RuntimeConstraints.API {
- for _, certfile := range arvadosclient.CertFiles {
- _, err := os.Stat(certfile)
- if err == nil {
- bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
+ for _, certfile := range []string{
+ // Populated by caller, or sdk/go/arvados init(), or test suite:
+ os.Getenv("SSL_CERT_FILE"),
+ // Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):
+ "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
+ "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
+ "/etc/ssl/ca-bundle.pem", // OpenSUSE
+ "/etc/pki/tls/cacert.pem", // OpenELEC
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
+ "/etc/ssl/cert.pem", // Alpine Linux
+ } {
+ if _, err := os.Stat(certfile); err == nil {
+ bindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}
break
}
}
@@ -659,6 +680,9 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
if err != nil {
return nil, fmt.Errorf("while trying to start arv-mount: %v", err)
}
+ if runner.hoststatReporter != nil && runner.ArvMount != nil {
+ runner.hoststatReporter.ReportPID("arv-mount", runner.ArvMount.Process.Pid)
+ }
for _, p := range collectionPaths {
_, err = os.Stat(p)
@@ -713,6 +737,7 @@ func (runner *ContainerRunner) stopHoststat() error {
return nil
}
runner.hoststatReporter.Stop()
+ runner.hoststatReporter.LogProcessMemMax(runner.CrunchLog)
err := runner.hoststatLogger.Close()
if err != nil {
return fmt.Errorf("error closing hoststat logs: %v", err)
@@ -727,11 +752,20 @@ func (runner *ContainerRunner) startHoststat() error {
}
runner.hoststatLogger = NewThrottledLogger(w)
runner.hoststatReporter = &crunchstat.Reporter{
- Logger: log.New(runner.hoststatLogger, "", 0),
- CgroupRoot: runner.cgroupRoot,
+ Logger: log.New(runner.hoststatLogger, "", 0),
+ // Our own cgroup is the "host" cgroup, in the sense
+ // that it accounts for resource usage outside the
+ // container. It doesn't count _all_ resource usage on
+ // the system.
+ //
+ // TODO?: Use the furthest ancestor of our own cgroup
+ // that has stats available. (Currently crunchstat
+ // does not have that capability.)
+ Pid: os.Getpid,
PollPeriod: runner.statInterval,
}
runner.hoststatReporter.Start()
+ runner.hoststatReporter.ReportPID("crunch-run", os.Getpid())
return nil
}
@@ -742,12 +776,15 @@ func (runner *ContainerRunner) startCrunchstat() error {
}
runner.statLogger = NewThrottledLogger(w)
runner.statReporter = &crunchstat.Reporter{
- CID: runner.executor.CgroupID(),
- Logger: log.New(runner.statLogger, "", 0),
- CgroupParent: runner.expectCgroupParent,
- CgroupRoot: runner.cgroupRoot,
- PollPeriod: runner.statInterval,
- TempDir: runner.parentTemp,
+ Pid: runner.executor.Pid,
+ FS: runner.crunchstatFakeFS,
+ Logger: log.New(runner.statLogger, "", 0),
+ MemThresholds: map[string][]crunchstat.Threshold{
+ "rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),
+ },
+ PollPeriod: runner.statInterval,
+ TempDir: runner.parentTemp,
+ ThresholdLogger: runner.CrunchLog,
}
runner.statReporter.Start()
return nil
@@ -1097,6 +1134,7 @@ func (runner *ContainerRunner) WaitFinish() error {
}
runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
"container": arvadosclient.Dict{"exit_code": exitcode},
}, nil)
if err != nil {
@@ -1126,6 +1164,9 @@ func (runner *ContainerRunner) WaitFinish() error {
if runner.statReporter != nil {
runner.statReporter.Stop()
+ runner.statReporter.LogMaxima(runner.CrunchLog, map[string]int64{
+ "rss": runner.Container.RuntimeConstraints.RAM,
+ })
err = runner.statLogger.Close()
if err != nil {
runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
@@ -1170,7 +1211,10 @@ func (runner *ContainerRunner) updateLogs() {
}
err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
- "container": arvadosclient.Dict{"log": saved.PortableDataHash},
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{
+ "log": saved.PortableDataHash,
+ },
}, nil)
if err != nil {
runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
@@ -1181,16 +1225,116 @@ func (runner *ContainerRunner) updateLogs() {
}
}
-func (runner *ContainerRunner) reportArvMountWarning(pattern, text string) {
- var updated arvados.Container
+var spotInterruptionCheckInterval = 5 * time.Second
+var ec2MetadataBaseURL = "http://169.254.169.254"
+
+const ec2TokenTTL = time.Second * 21600
+
+func (runner *ContainerRunner) checkSpotInterruptionNotices() {
+ type ec2metadata struct {
+ Action string `json:"action"`
+ Time time.Time `json:"time"`
+ }
+ runner.CrunchLog.Printf("Checking for spot interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL)
+ var metadata ec2metadata
+ var token string
+ var tokenExp time.Time
+ check := func() error {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+ if token == "" || tokenExp.Sub(time.Now()) < time.Minute {
+ req, err := http.NewRequestWithContext(ctx, http.MethodPut, ec2MetadataBaseURL+"/latest/api/token", nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", fmt.Sprintf("%d", int(ec2TokenTTL/time.Second)))
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s", resp.Status)
+ }
+ newtoken, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ token = strings.TrimSpace(string(newtoken))
+ tokenExp = time.Now().Add(ec2TokenTTL)
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, ec2MetadataBaseURL+"/latest/meta-data/spot/instance-action", nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("X-aws-ec2-metadata-token", token)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ metadata = ec2metadata{}
+ switch resp.StatusCode {
+ case http.StatusOK:
+ break
+ case http.StatusNotFound:
+ // "If Amazon EC2 is not preparing to stop or
+ // terminate the instance, or if you
+ // terminated the instance yourself,
+ // instance-action is not present in the
+ // instance metadata and you receive an HTTP
+ // 404 error when you try to retrieve it."
+ return nil
+ case http.StatusUnauthorized:
+ token = ""
+ return fmt.Errorf("%s", resp.Status)
+ default:
+ return fmt.Errorf("%s", resp.Status)
+ }
+ err = json.NewDecoder(resp.Body).Decode(&metadata)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+ failures := 0
+ var lastmetadata ec2metadata
+ for range time.NewTicker(spotInterruptionCheckInterval).C {
+ err := check()
+ if err != nil {
+ runner.CrunchLog.Printf("Error checking spot interruptions: %s", err)
+ failures++
+ if failures > 5 {
+ runner.CrunchLog.Printf("Giving up on checking spot interruptions after too many consecutive failures")
+ return
+ }
+ continue
+ }
+ failures = 0
+ if metadata != lastmetadata {
+ lastmetadata = metadata
+ text := fmt.Sprintf("Cloud provider scheduled instance %s at %s", metadata.Action, metadata.Time.UTC().Format(time.RFC3339))
+ runner.CrunchLog.Printf("%s", text)
+ runner.updateRuntimeStatus(arvadosclient.Dict{
+ "warning": "preemption notice",
+ "warningDetail": text,
+ "preemptionNotice": text,
+ })
+ if proc, err := os.FindProcess(os.Getpid()); err == nil {
+ // trigger updateLogs
+ proc.Signal(syscall.SIGUSR1)
+ }
+ }
+ }
+}
+
+func (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) {
err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
"container": arvadosclient.Dict{
- "runtime_status": arvadosclient.Dict{
- "warning": "arv-mount: " + pattern,
- "warningDetail": text,
- },
+ "runtime_status": status,
},
- }, &updated)
+ }, nil)
if err != nil {
runner.CrunchLog.Printf("error updating container runtime_status: %s", err)
}
@@ -1203,7 +1347,9 @@ func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) er
// Output may have been set directly by the container, so
// refresh the container record to check.
err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
- nil, &runner.Container)
+ arvadosclient.Dict{
+ "select": []string{"output"},
+ }, &runner.Container)
if err != nil {
return err
}
@@ -1216,7 +1362,6 @@ func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) er
txt, err := (&copier{
client: runner.containerClient,
- arvClient: runner.ContainerArvClient,
keepClient: runner.ContainerKeepClient,
hostOutputDir: runner.HostOutputDir,
ctrOutputDir: runner.Container.OutputPath,
@@ -1242,6 +1387,7 @@ func (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) er
var resp arvados.Collection
err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
"ensure_unique_name": true,
+ "select": []string{"portable_data_hash"},
"collection": arvadosclient.Dict{
"is_trashed": true,
"name": "output for " + runner.Container.UUID,
@@ -1368,6 +1514,8 @@ func (runner *ContainerRunner) CommitLogs() error {
return nil
}
+// Create/update the log collection. Return value has UUID and
+// PortableDataHash fields populated, but others may be blank.
func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
runner.logMtx.Lock()
defer runner.logMtx.Unlock()
@@ -1392,11 +1540,20 @@ func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.C
if final {
updates["is_trashed"] = true
} else {
- exp := time.Now().Add(crunchLogUpdatePeriod * 24)
+ // We set trash_at so this collection gets
+ // automatically cleaned up eventually. It used to be
+ // 12 hours but we had a situation where the API
+ // server was down over a weekend but the containers
+ // kept running such that the log collection got
+ // trashed, so now we make it 2 weeks. refs #20378
+ exp := time.Now().Add(time.Duration(24*14) * time.Hour)
updates["trash_at"] = exp
updates["delete_at"] = exp
}
- reqBody := arvadosclient.Dict{"collection": updates}
+ reqBody := arvadosclient.Dict{
+ "select": []string{"uuid", "portable_data_hash"},
+ "collection": updates,
+ }
var err2 error
if runner.logUUID == "" {
reqBody["ensure_unique_name"] = true
@@ -1415,14 +1572,28 @@ func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.C
}
// UpdateContainerRunning updates the container state to "Running"
-func (runner *ContainerRunner) UpdateContainerRunning() error {
+func (runner *ContainerRunner) UpdateContainerRunning(logId string) error {
runner.cStateLock.Lock()
defer runner.cStateLock.Unlock()
if runner.cCancelled {
return ErrCancelled
}
- return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
- arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil)
+ updates := arvadosclient.Dict{
+ "gateway_address": runner.gateway.Address,
+ "state": "Running",
+ }
+ if logId != "" {
+ updates["log"] = logId
+ }
+ return runner.DispatcherArvClient.Update(
+ "containers",
+ runner.Container.UUID,
+ arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": updates,
+ },
+ nil,
+ )
}
// ContainerToken returns the api_token the container (and any
@@ -1457,7 +1628,11 @@ func (runner *ContainerRunner) UpdateContainerFinal() error {
if runner.finalState == "Complete" && runner.OutputPDH != nil {
update["output"] = *runner.OutputPDH
}
- return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
+ update["cost"] = runner.calculateCost(time.Now())
+ return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": update,
+ }, nil)
}
// IsCancelled returns the value of Cancelled, with goroutine safety.
@@ -1489,6 +1664,7 @@ func (runner *ContainerRunner) Run() (err error) {
runner.CrunchLog.Printf("Using FUSE mount: %s", v)
runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
+ runner.costStartTime = time.Now()
hostname, hosterr := os.Hostname()
if hosterr != nil {
@@ -1497,6 +1673,12 @@ func (runner *ContainerRunner) Run() (err error) {
runner.CrunchLog.Printf("Executing on host '%s'", hostname)
}
+ sigusr2 := make(chan os.Signal, 1)
+ signal.Notify(sigusr2, syscall.SIGUSR2)
+ defer signal.Stop(sigusr2)
+ runner.loadPrices()
+ go runner.handleSIGUSR2(sigusr2)
+
runner.finalState = "Queued"
defer func() {
@@ -1563,6 +1745,9 @@ func (runner *ContainerRunner) Run() (err error) {
if err != nil {
return
}
+ if runner.keepstore != nil {
+ runner.hoststatReporter.ReportPID("keepstore", runner.keepstore.Process.Pid)
+ }
// set up FUSE mount and binds
bindmounts, err = runner.SetupMounts()
@@ -1605,7 +1790,14 @@ func (runner *ContainerRunner) Run() (err error) {
return
}
- err = runner.UpdateContainerRunning()
+ logCollection, err := runner.saveLogCollection(false)
+ var logId string
+ if err == nil {
+ logId = logCollection.PortableDataHash
+ } else {
+ runner.CrunchLog.Printf("Error committing initial log collection: %v", err)
+ }
+ err = runner.UpdateContainerRunning(logId)
if err != nil {
return
}
@@ -1727,16 +1919,16 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
log := log.New(stderr, "", 0)
flags := flag.NewFlagSet(prog, flag.ContinueOnError)
statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
- cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
- cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
- cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
+ flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree (obsolete, ignored)")
+ flags.String("cgroup-parent", "docker", "name of container's parent cgroup (obsolete, ignored)")
+ cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
- list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
+ list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes (and notify them to use price data passed on stdin)")
enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage")
enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)")
networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
@@ -1772,11 +1964,11 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
switch {
case *detach && !ignoreDetachFlag:
- return Detach(containerUUID, prog, args, os.Stdin, os.Stdout, os.Stderr)
+ return Detach(containerUUID, prog, args, stdin, stdout, stderr)
case *kill >= 0:
- return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr)
+ return KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr)
case *list:
- return ListProcesses(os.Stdout, os.Stderr)
+ return ListProcesses(stdin, stdout, stderr)
}
if len(containerUUID) != 27 {
@@ -1814,7 +2006,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
time.Sleep(*sleep)
if *caCertsPath != "" {
- arvadosclient.CertFiles = []string{*caCertsPath}
+ os.Setenv("SSL_CERT_FILE", *caCertsPath)
}
keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
@@ -1831,14 +2023,15 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
log.Printf("%s: %v", containerUUID, err)
return 1
}
- api.Retries = 8
+ // arvadosclient now interprets Retries=10 to mean
+ // Timeout=10m, retrying with exponential backoff + jitter.
+ api.Retries = 10
kc, err := keepclient.MakeKeepClient(api)
if err != nil {
log.Printf("%s: %v", containerUUID, err)
return 1
}
- kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
kc.Retries = 4
cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
@@ -1847,6 +2040,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
return 1
}
+ cr.keepstore = keepstore
if keepstore == nil {
// Log explanation (if any) for why we're not running
// a local keepstore.
@@ -1918,6 +2112,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
ContainerUUID: containerUUID,
Target: cr.executor,
Log: cr.CrunchLog,
+ LogCollection: cr.LogCollection,
}
if gwListen == "" {
// Direct connection won't work, so we use the
@@ -1928,7 +2123,10 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
cr.gateway.UpdateTunnelURL = func(url string) {
cr.gateway.Address = "tunnel " + url
cr.DispatcherArvClient.Update("containers", containerUUID,
- arvadosclient.Dict{"container": arvadosclient.Dict{"gateway_address": cr.gateway.Address}}, nil)
+ arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{"gateway_address": cr.gateway.Address},
+ }, nil)
}
}
err = cr.gateway.Start()
@@ -1946,19 +2144,20 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
cr.parentTemp = parentTemp
cr.statInterval = *statInterval
- cr.cgroupRoot = *cgroupRoot
- cr.expectCgroupParent = *cgroupParent
cr.enableMemoryLimit = *enableMemoryLimit
cr.enableNetwork = *enableNetwork
cr.networkMode = *networkMode
if *cgroupParentSubsystem != "" {
- p, err := findCgroup(*cgroupParentSubsystem)
+ p, err := findCgroup(os.DirFS("/"), *cgroupParentSubsystem)
if err != nil {
log.Printf("fatal: cgroup parent subsystem: %s", err)
return 1
}
cr.setCgroupParent = p
- cr.expectCgroupParent = p
+ }
+
+ if conf.EC2SpotCheck {
+ go cr.checkSpotInterruptionNotices()
}
runerr := cr.Run()
@@ -2002,7 +2201,9 @@ func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
return conf
}
- arv.Retries = 8
+ // arvadosclient now interprets Retries=10 to mean
+ // Timeout=10m, retrying with exponential backoff + jitter.
+ arv.Retries = 10
var ctr arvados.Container
err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
if err != nil {
@@ -2055,9 +2256,14 @@ func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, er
}
// Rather than have an alternate way to tell keepstore how
- // many buffers to use when starting it this way, we just
- // modify the cluster configuration that we feed it on stdin.
- configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers
+ // many buffers to use, etc., when starting it this way, we
+ // just modify the cluster configuration that we feed it on
+ // stdin.
+ ccfg := *configData.Cluster
+ ccfg.API.MaxKeepBlobBuffers = configData.KeepBuffers
+ ccfg.Collections.BlobTrash = false
+ ccfg.Collections.BlobTrashConcurrency = 0
+ ccfg.Collections.BlobDeleteConcurrency = 0
localaddr := localKeepstoreAddr()
ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
@@ -2077,7 +2283,7 @@ func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, er
var confJSON bytes.Buffer
err = json.NewEncoder(&confJSON).Encode(arvados.Config{
Clusters: map[string]arvados.Cluster{
- configData.Cluster.ClusterID: *configData.Cluster,
+ ccfg.ClusterID: ccfg,
},
})
if err != nil {
@@ -2202,3 +2408,100 @@ func localKeepstoreAddr() string {
})
return ips[0].String()
}
+
+func (cr *ContainerRunner) loadPrices() {
+ buf, err := os.ReadFile(filepath.Join(lockdir, pricesfile))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ cr.CrunchLog.Printf("loadPrices: read: %s", err)
+ }
+ return
+ }
+ var prices []cloud.InstancePrice
+ err = json.Unmarshal(buf, &prices)
+ if err != nil {
+ cr.CrunchLog.Printf("loadPrices: decode: %s", err)
+ return
+ }
+ cr.pricesLock.Lock()
+ defer cr.pricesLock.Unlock()
+ var lastKnown time.Time
+ if len(cr.prices) > 0 {
+ lastKnown = cr.prices[0].StartTime
+ }
+ cr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...))
+ for i := len(cr.prices) - 1; i >= 0; i-- {
+ price := cr.prices[i]
+ if price.StartTime.After(lastKnown) {
+ cr.CrunchLog.Printf("Instance price changed to %#.3g at %s", price.Price, price.StartTime.UTC())
+ }
+ }
+}
+
+func (cr *ContainerRunner) calculateCost(now time.Time) float64 {
+ cr.pricesLock.Lock()
+ defer cr.pricesLock.Unlock()
+
+ // First, make a "prices" slice with the real data as far back
+ // as it goes, and (if needed) a "since the beginning of time"
+ // placeholder containing a reasonable guess about what the
+ // price was between cr.costStartTime and the earliest real
+ // data point.
+ prices := cr.prices
+ if len(prices) == 0 {
+ // use price info in InstanceType record initially
+ // provided by cloud dispatcher
+ var p float64
+ var it arvados.InstanceType
+ if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
+ p = it.Price
+ }
+ prices = []cloud.InstancePrice{{Price: p}}
+ } else if prices[len(prices)-1].StartTime.After(cr.costStartTime) {
+ // guess earlier pricing was the same as the earliest
+ // price we know about
+ filler := prices[len(prices)-1]
+ filler.StartTime = time.Time{}
+ prices = append(prices, filler)
+ }
+
+ // Now that our history of price changes goes back at least as
+ // far as cr.costStartTime, add up the costs for each
+ // interval.
+ cost := 0.0
+ spanEnd := now
+ for _, ip := range prices {
+ spanStart := ip.StartTime
+ if spanStart.After(now) {
+ // pricing information from the future -- not
+ // expected from AWS, but possible in
+ // principle, and exercised by tests.
+ continue
+ }
+ last := false
+ if spanStart.Before(cr.costStartTime) {
+ spanStart = cr.costStartTime
+ last = true
+ }
+ cost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600
+ if last {
+ break
+ }
+ spanEnd = spanStart
+ }
+
+ return cost
+}
+
+func (runner *ContainerRunner) handleSIGUSR2(sigchan chan os.Signal) {
+ for range sigchan {
+ runner.loadPrices()
+ update := arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{
+ "cost": runner.calculateCost(time.Now()),
+ },
+ }
+ runner.DispatcherArvClient.Update("containers", runner.Container.UUID, update, nil)
+ }
+}
diff --git a/lib/crunchrun/crunchrun_test.go b/lib/crunchrun/crunchrun_test.go
index 76289b951d..276dd36661 100644
--- a/lib/crunchrun/crunchrun_test.go
+++ b/lib/crunchrun/crunchrun_test.go
@@ -6,30 +6,41 @@ package crunchrun
import (
"bytes"
+ "context"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "net/http/httputil"
+ "net/url"
"os"
"os/exec"
+ "path"
"regexp"
"runtime/pprof"
"strings"
"sync"
+ "sync/atomic"
"syscall"
"testing"
"time"
+ "git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/manifest"
- "golang.org/x/net/context"
. "gopkg.in/check.v1"
+ git_client "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
+ git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
)
// Gocheck boilerplate
@@ -37,6 +48,8 @@ func TestCrunchExec(t *testing.T) {
TestingT(t)
}
+const logLineStart = `(?m)(.*\n)*\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d+Z `
+
var _ = Suite(&TestSuite{})
type TestSuite struct {
@@ -48,6 +61,20 @@ type TestSuite struct {
keepmountTmp []string
testDispatcherKeepClient KeepTestClient
testContainerKeepClient KeepTestClient
+ debian12MemoryCurrent int64
+ debian12SwapCurrent int64
+}
+
+func (s *TestSuite) SetUpSuite(c *C) {
+ buf, err := os.ReadFile("../crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current")
+ c.Assert(err, IsNil)
+ _, err = fmt.Sscanf(string(buf), "%d", &s.debian12MemoryCurrent)
+ c.Assert(err, IsNil)
+
+ buf, err = os.ReadFile("../crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current")
+ c.Assert(err, IsNil)
+ _, err = fmt.Sscanf(string(buf), "%d", &s.debian12SwapCurrent)
+ c.Assert(err, IsNil)
}
func (s *TestSuite) SetUpTest(c *C) {
@@ -119,7 +146,7 @@ type stubExecutor struct {
stopErr error
stopped bool
closed bool
- runFunc func()
+ runFunc func() int
exit chan int
}
@@ -131,10 +158,14 @@ func (e *stubExecutor) LoadImage(imageId string, tarball string, container arvad
func (e *stubExecutor) Runtime() string { return "stub" }
func (e *stubExecutor) Version() string { return "stub " + cmd.Version.String() }
func (e *stubExecutor) Create(spec containerSpec) error { e.created = spec; return e.createErr }
-func (e *stubExecutor) Start() error { e.exit = make(chan int, 1); go e.runFunc(); return e.startErr }
-func (e *stubExecutor) CgroupID() string { return "cgroupid" }
-func (e *stubExecutor) Stop() error { e.stopped = true; go func() { e.exit <- -1 }(); return e.stopErr }
-func (e *stubExecutor) Close() { e.closed = true }
+func (e *stubExecutor) Start() error {
+ e.exit = make(chan int, 1)
+ go func() { e.exit <- e.runFunc() }()
+ return e.startErr
+}
+func (e *stubExecutor) Pid() int { return 1115883 } // matches pid in ../crunchstat/testdata/debian12/proc/
+func (e *stubExecutor) Stop() error { e.stopped = true; go func() { e.exit <- -1 }(); return e.stopErr }
+func (e *stubExecutor) Close() { e.closed = true }
func (e *stubExecutor) Wait(context.Context) (int, error) {
return <-e.exit, e.waitErr
}
@@ -188,9 +219,10 @@ func (client *ArvTestClient) Create(resourceType string,
if resourceType == "collections" && output != nil {
mt := parameters["collection"].(arvadosclient.Dict)["manifest_text"].(string)
+ md5sum := md5.Sum([]byte(mt))
outmap := output.(*arvados.Collection)
- outmap.PortableDataHash = fmt.Sprintf("%x+%d", md5.Sum([]byte(mt)), len(mt))
- outmap.UUID = fmt.Sprintf("zzzzz-4zz18-%15.15x", md5.Sum([]byte(mt)))
+ outmap.PortableDataHash = fmt.Sprintf("%x+%d", md5sum, len(mt))
+ outmap.UUID = fmt.Sprintf("zzzzz-4zz18-%015x", md5sum[:7])
}
return nil
@@ -336,9 +368,6 @@ func (client *KeepTestClient) ReadAt(string, []byte, int) (int, error) {
return 0, errors.New("not implemented")
}
-func (client *KeepTestClient) ClearBlockCache() {
-}
-
func (client *KeepTestClient) Close() {
client.Content = nil
}
@@ -401,6 +430,67 @@ func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename s
return nil, nil
}
+type apiStubServer struct {
+ server *httptest.Server
+ proxy *httputil.ReverseProxy
+ intercept func(http.ResponseWriter, *http.Request) bool
+
+ container arvados.Container
+ logs map[string]string
+}
+
+func apiStub() (*arvados.Client, *apiStubServer) {
+ client := arvados.NewClientFromEnv()
+ apistub := &apiStubServer{}
+ apistub.server = httptest.NewTLSServer(apistub)
+ apistub.proxy = httputil.NewSingleHostReverseProxy(&url.URL{Scheme: "https", Host: client.APIHost})
+ if client.Insecure {
+ apistub.proxy.Transport = arvados.InsecureHTTPClient.Transport
+ }
+ client.APIHost = apistub.server.Listener.Addr().String()
+ return client, apistub
+}
+
+func (apistub *apiStubServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if apistub.intercept != nil && apistub.intercept(w, r) {
+ return
+ }
+ if r.Method == "POST" && r.URL.Path == "/arvados/v1/logs" {
+ var body struct {
+ Log struct {
+ EventType string `json:"event_type"`
+ Properties struct {
+ Text string
+ }
+ }
+ }
+ json.NewDecoder(r.Body).Decode(&body)
+ apistub.logs[body.Log.EventType] += body.Log.Properties.Text
+ return
+ }
+ if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+hwPDH {
+ json.NewEncoder(w).Encode(arvados.Collection{ManifestText: hwManifest})
+ return
+ }
+ if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+otherPDH {
+ json.NewEncoder(w).Encode(arvados.Collection{ManifestText: otherManifest})
+ return
+ }
+ if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+normalizedWithSubdirsPDH {
+ json.NewEncoder(w).Encode(arvados.Collection{ManifestText: normalizedManifestWithSubdirs})
+ return
+ }
+ if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+denormalizedWithSubdirsPDH {
+ json.NewEncoder(w).Encode(arvados.Collection{ManifestText: denormalizedManifestWithSubdirs})
+ return
+ }
+ if r.Method == "GET" && r.URL.Path == "/arvados/v1/containers/"+apistub.container.UUID {
+ json.NewEncoder(w).Encode(apistub.container)
+ return
+ }
+ apistub.proxy.ServeHTTP(w, r)
+}
+
func (s *TestSuite) TestLoadImage(c *C) {
s.runner.Container.ContainerImage = arvadostest.DockerImage112PDH
s.runner.Container.Mounts = map[string]arvados.Mount{
@@ -538,9 +628,9 @@ func dockerLog(fd byte, msg string) []byte {
}
func (s *TestSuite) TestRunContainer(c *C) {
- s.executor.runFunc = func() {
+ s.executor.runFunc = func() int {
fmt.Fprintf(s.executor.created.Stdout, "Hello world\n")
- s.executor.exit <- 0
+ return 0
}
var logs TestLogs
@@ -594,7 +684,7 @@ func (s *TestSuite) TestUpdateContainerRunning(c *C) {
cr, err := NewContainerRunner(s.client, api, kc, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
c.Assert(err, IsNil)
- err = cr.UpdateContainerRunning()
+ err = cr.UpdateContainerRunning("")
c.Check(err, IsNil)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running")
@@ -641,7 +731,7 @@ func (s *TestSuite) TestUpdateContainerCancelled(c *C) {
// Used by the TestFullRun*() test below to DRY up boilerplate setup to do full
// dress rehearsal of the Run() function, starting from a JSON container record.
-func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exitCode int, fn func()) (*ArvTestClient, *ContainerRunner, string) {
+func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, fn func() int) (*ArvTestClient, *ContainerRunner, string) {
err := json.Unmarshal([]byte(record), &s.api.Container)
c.Assert(err, IsNil)
initialState := s.api.Container.State
@@ -655,10 +745,7 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi
c.Assert(err, IsNil)
c.Logf("SecretMounts decoded %v json %q", sm, secretMounts)
- s.executor.runFunc = func() {
- fn()
- s.executor.exit <- exitCode
- }
+ s.executor.runFunc = fn
s.runner.statInterval = 100 * time.Millisecond
s.runner.containerWatchdogInterval = time.Second
@@ -675,8 +762,9 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi
}
return d, err
}
+ client, _ := apiStub()
s.runner.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
- return &ArvTestClient{secretMounts: secretMounts}, &s.testContainerKeepClient, nil, nil
+ return &ArvTestClient{secretMounts: secretMounts}, &s.testContainerKeepClient, client, nil
}
if extraMounts != nil && len(extraMounts) > 0 {
@@ -729,7 +817,7 @@ func (s *TestSuite) TestFullRunHello(c *C) {
"runtime_constraints": {"vcpus":1,"ram":1000000},
"state": "Locked",
"output_storage_classes": ["default"]
-}`, nil, 0, func() {
+}`, nil, func() int {
c.Check(s.executor.created.Command, DeepEquals, []string{"echo", "hello world"})
c.Check(s.executor.created.Image, Equals, "sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678")
c.Check(s.executor.created.Env, DeepEquals, map[string]string{"foo": "bar", "baz": "waz"})
@@ -739,6 +827,7 @@ func (s *TestSuite) TestFullRunHello(c *C) {
c.Check(s.executor.created.EnableNetwork, Equals, false)
c.Check(s.executor.created.CUDADeviceCount, Equals, 0)
fmt.Fprintln(s.executor.created.Stdout, "hello world")
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -761,14 +850,92 @@ func (s *TestSuite) TestRunAlreadyRunning(c *C) {
"runtime_constraints": {},
"scheduling_parameters":{"max_run_time": 1},
"state": "Running"
-}`, nil, 2, func() {
+}`, nil, func() int {
ran = true
+ return 2
})
c.Check(s.api.CalledWith("container.state", "Cancelled"), IsNil)
c.Check(s.api.CalledWith("container.state", "Complete"), IsNil)
c.Check(ran, Equals, false)
}
+func ec2MetadataServerStub(c *C, token *string, failureRate float64, stoptime *atomic.Value) *httptest.Server {
+ failedOnce := false
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !failedOnce || rand.Float64() < failureRate {
+ w.WriteHeader(http.StatusServiceUnavailable)
+ failedOnce = true
+ return
+ }
+ switch r.URL.Path {
+ case "/latest/api/token":
+ fmt.Fprintln(w, *token)
+ case "/latest/meta-data/spot/instance-action":
+ if r.Header.Get("X-aws-ec2-metadata-token") != *token {
+ w.WriteHeader(http.StatusUnauthorized)
+ } else if t, _ := stoptime.Load().(time.Time); t.IsZero() {
+ w.WriteHeader(http.StatusNotFound)
+ } else {
+ fmt.Fprintf(w, `{"action":"stop","time":"%s"}`, t.Format(time.RFC3339))
+ }
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
+
+func (s *TestSuite) TestSpotInterruptionNotice(c *C) {
+ s.testSpotInterruptionNotice(c, 0.1)
+}
+
+func (s *TestSuite) TestSpotInterruptionNoticeNotAvailable(c *C) {
+ s.testSpotInterruptionNotice(c, 1)
+}
+
+func (s *TestSuite) testSpotInterruptionNotice(c *C, failureRate float64) {
+ var stoptime atomic.Value
+ token := "fake-ec2-metadata-token"
+ stub := ec2MetadataServerStub(c, &token, failureRate, &stoptime)
+ defer stub.Close()
+
+ defer func(i time.Duration, u string) {
+ spotInterruptionCheckInterval = i
+ ec2MetadataBaseURL = u
+ }(spotInterruptionCheckInterval, ec2MetadataBaseURL)
+ spotInterruptionCheckInterval = time.Second / 8
+ ec2MetadataBaseURL = stub.URL
+
+ go s.runner.checkSpotInterruptionNotices()
+ s.fullRunHelper(c, `{
+ "command": ["sleep", "3"],
+ "container_image": "`+arvadostest.DockerImage112PDH+`",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {},
+ "state": "Locked"
+}`, nil, func() int {
+ time.Sleep(time.Second)
+ stoptime.Store(time.Now().Add(time.Minute).UTC())
+ token = "different-fake-ec2-metadata-token"
+ time.Sleep(time.Second)
+ return 0
+ })
+ c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Checking for spot interruptions every 125ms using instance metadata at http://.*`)
+ c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Error checking spot interruptions: 503 Service Unavailable.*`)
+ if failureRate == 1 {
+ c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Giving up on checking spot interruptions after too many consecutive failures.*`)
+ } else {
+ text := `Cloud provider scheduled instance stop at ` + stoptime.Load().(time.Time).Format(time.RFC3339)
+ c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*`+text+`.*`)
+ c.Check(s.api.CalledWith("container.runtime_status.warning", "preemption notice"), NotNil)
+ c.Check(s.api.CalledWith("container.runtime_status.warningDetail", text), NotNil)
+ c.Check(s.api.CalledWith("container.runtime_status.preemptionNotice", text), NotNil)
+ }
+}
+
func (s *TestSuite) TestRunTimeExceeded(c *C) {
s.fullRunHelper(c, `{
"command": ["sleep", "3"],
@@ -781,8 +948,9 @@ func (s *TestSuite) TestRunTimeExceeded(c *C) {
"runtime_constraints": {},
"scheduling_parameters":{"max_run_time": 1},
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
time.Sleep(3 * time.Second)
+ return 0
})
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
@@ -798,8 +966,9 @@ func (s *TestSuite) TestContainerWaitFails(c *C) {
"output_path": "/tmp",
"priority": 1,
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
s.executor.waitErr = errors.New("Container is not running")
+ return 0
})
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
@@ -807,6 +976,7 @@ func (s *TestSuite) TestContainerWaitFails(c *C) {
}
func (s *TestSuite) TestCrunchstat(c *C) {
+ s.runner.crunchstatFakeFS = os.DirFS("../crunchstat/testdata/debian12")
s.fullRunHelper(c, `{
"command": ["sleep", "1"],
"container_image": "`+arvadostest.DockerImage112PDH+`",
@@ -817,25 +987,19 @@ func (s *TestSuite) TestCrunchstat(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
- }`, nil, 0, func() {
+ }`, nil, func() int {
time.Sleep(time.Second)
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- // We didn't actually start a container, so crunchstat didn't
- // find accounting files and therefore didn't log any stats.
- // It should have logged a "can't find accounting files"
- // message after one poll interval, though, so we can confirm
- // it's alive:
c.Assert(s.api.Logs["crunchstat"], NotNil)
- c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files have not appeared after 100ms.*`)
+ c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*mem \d+ swap \d+ pgmajfault \d+ rss.*`)
- // The "files never appeared" log assures us that we called
- // (*crunchstat.Reporter)Stop(), and that we set it up with
- // the correct container ID "abcde":
- c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files never appeared for cgroupid\n`)
+ // Check that we called (*crunchstat.Reporter)Stop().
+ c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Maximum crunch-run memory rss usage was \d+ bytes\n.*`)
}
func (s *TestSuite) TestNodeInfoLog(c *C) {
@@ -850,10 +1014,10 @@ func (s *TestSuite) TestNodeInfoLog(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
- }`, nil, 0,
- func() {
- time.Sleep(time.Second)
- })
+ }`, nil, func() int {
+ time.Sleep(time.Second)
+ return 0
+ })
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
@@ -884,9 +1048,9 @@ func (s *TestSuite) TestLogVersionAndRuntime(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
- }`, nil, 0,
- func() {
- })
+ }`, nil, func() int {
+ return 0
+ })
c.Assert(s.api.Logs["crunch-run"], NotNil)
c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*crunch-run \S+ \(go\S+\) start.*`)
@@ -895,6 +1059,106 @@ func (s *TestSuite) TestLogVersionAndRuntime(c *C) {
c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Using container runtime: stub.*`)
}
+func (s *TestSuite) testLogRSSThresholds(c *C, ram int64, expected []int, notExpected int) {
+ s.runner.crunchstatFakeFS = os.DirFS("../crunchstat/testdata/debian12")
+ s.fullRunHelper(c, `{
+ "command": ["true"],
+ "container_image": "`+arvadostest.DockerImage112PDH+`",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {"ram": `+fmt.Sprintf("%d", ram)+`},
+ "state": "Locked"
+ }`, nil, func() int { return 0 })
+ c.Logf("=== crunchstat logs\n%s\n", s.api.Logs["crunchstat"].String())
+ logs := s.api.Logs["crunch-run"].String()
+ pattern := logLineStart + `Container using over %d%% of memory \(rss %d/%d bytes\)`
+ var threshold int
+ for _, threshold = range expected {
+ c.Check(logs, Matches, fmt.Sprintf(pattern, threshold, s.debian12MemoryCurrent, ram))
+ }
+ if notExpected > threshold {
+ c.Check(logs, Not(Matches), fmt.Sprintf(pattern, notExpected, s.debian12MemoryCurrent, ram))
+ }
+}
+
+func (s *TestSuite) TestLogNoRSSThresholds(c *C) {
+ s.testLogRSSThresholds(c, s.debian12MemoryCurrent*10, []int{}, 90)
+}
+
+func (s *TestSuite) TestLogSomeRSSThresholds(c *C) {
+ onePercentRSS := s.debian12MemoryCurrent / 100
+ s.testLogRSSThresholds(c, 102*onePercentRSS, []int{90, 95}, 99)
+}
+
+func (s *TestSuite) TestLogAllRSSThresholds(c *C) {
+ s.testLogRSSThresholds(c, s.debian12MemoryCurrent, []int{90, 95, 99}, 0)
+}
+
+func (s *TestSuite) TestLogMaximaAfterRun(c *C) {
+ s.runner.crunchstatFakeFS = os.DirFS("../crunchstat/testdata/debian12")
+ s.runner.parentTemp = c.MkDir()
+ s.fullRunHelper(c, `{
+ "command": ["true"],
+ "container_image": "`+arvadostest.DockerImage112PDH+`",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {"ram": `+fmt.Sprintf("%d", s.debian12MemoryCurrent*10)+`},
+ "state": "Locked"
+ }`, nil, func() int { return 0 })
+ logs := s.api.Logs["crunch-run"].String()
+ for _, expected := range []string{
+ `Maximum disk usage was \d+%, \d+/\d+ bytes`,
+ fmt.Sprintf(`Maximum container memory swap usage was %d bytes`, s.debian12SwapCurrent),
+ `Maximum container memory pgmajfault usage was \d+ faults`,
+ fmt.Sprintf(`Maximum container memory rss usage was 10%%, %d/%d bytes`, s.debian12MemoryCurrent, s.debian12MemoryCurrent*10),
+ `Maximum crunch-run memory rss usage was \d+ bytes`,
+ } {
+ c.Check(logs, Matches, logLineStart+expected)
+ }
+}
+
+func (s *TestSuite) TestCommitNodeInfoBeforeStart(c *C) {
+ var collection_create, container_update arvadosclient.Dict
+ s.fullRunHelper(c, `{
+ "command": ["true"],
+ "container_image": "`+arvadostest.DockerImage112PDH+`",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {},
+ "state": "Locked",
+ "uuid": "zzzzz-dz642-202301121543210"
+ }`, nil, func() int {
+ collection_create = s.api.CalledWith("ensure_unique_name", true)
+ container_update = s.api.CalledWith("container.state", "Running")
+ return 0
+ })
+
+ c.Assert(collection_create, NotNil)
+ log_collection := collection_create["collection"].(arvadosclient.Dict)
+ c.Check(log_collection["name"], Equals, "logs for zzzzz-dz642-202301121543210")
+ manifest_text := log_collection["manifest_text"].(string)
+ // We check that the file size is at least two digits as an easy way to
+ // check the file isn't empty.
+ c.Check(manifest_text, Matches, `\. .+ \d+:\d{2,}:node-info\.txt( .+)?\n`)
+ c.Check(manifest_text, Matches, `\. .+ \d+:\d{2,}:node\.json( .+)?\n`)
+
+ c.Assert(container_update, NotNil)
+ // As of Arvados 2.5.0, the container update must specify its log in PDH
+ // format for the API server to propagate it to container requests, which
+ // is what we care about for this test.
+ expect_pdh := fmt.Sprintf("%x+%d", md5.Sum([]byte(manifest_text)), len(manifest_text))
+ c.Check(container_update["container"].(arvadosclient.Dict)["log"], Equals, expect_pdh)
+}
+
func (s *TestSuite) TestContainerRecordLog(c *C) {
s.fullRunHelper(c, `{
"command": ["sleep", "1"],
@@ -906,9 +1170,10 @@ func (s *TestSuite) TestContainerRecordLog(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
- }`, nil, 0,
- func() {
+ }`, nil,
+ func() int {
time.Sleep(time.Second)
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -929,9 +1194,10 @@ func (s *TestSuite) TestFullRunStderr(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 1, func() {
+}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, "hello")
fmt.Fprintln(s.executor.created.Stderr, "world")
+ return 1
})
final := s.api.CalledWith("container.state", "Complete")
@@ -954,8 +1220,9 @@ func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
fmt.Fprintf(s.executor.created.Stdout, "workdir=%q", s.executor.created.WorkingDir)
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -975,8 +1242,9 @@ func (s *TestSuite) TestFullRunSetCwd(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.WorkingDir)
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -996,8 +1264,9 @@ func (s *TestSuite) TestFullRunSetOutputStorageClasses(c *C) {
"runtime_constraints": {},
"state": "Locked",
"output_storage_classes": ["foo", "bar"]
-}`, nil, 0, func() {
+}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.WorkingDir)
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -1019,8 +1288,9 @@ func (s *TestSuite) TestEnableCUDADeviceCount(c *C) {
"runtime_constraints": {"cuda": {"device_count": 2}},
"state": "Locked",
"output_storage_classes": ["foo", "bar"]
-}`, nil, 0, func() {
+}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, "ok")
+ return 0
})
c.Check(s.executor.created.CUDADeviceCount, Equals, 2)
}
@@ -1037,25 +1307,30 @@ func (s *TestSuite) TestEnableCUDAHardwareCapability(c *C) {
"runtime_constraints": {"cuda": {"hardware_capability": "foo"}},
"state": "Locked",
"output_storage_classes": ["foo", "bar"]
-}`, nil, 0, func() {
+}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, "ok")
+ return 0
})
c.Check(s.executor.created.CUDADeviceCount, Equals, 0)
}
func (s *TestSuite) TestStopOnSignal(c *C) {
- s.executor.runFunc = func() {
+ s.executor.runFunc = func() int {
s.executor.created.Stdout.Write([]byte("foo\n"))
s.runner.SigChan <- syscall.SIGINT
+ time.Sleep(10 * time.Second)
+ return 0
}
s.testStopContainer(c)
}
func (s *TestSuite) TestStopOnArvMountDeath(c *C) {
- s.executor.runFunc = func() {
+ s.executor.runFunc = func() int {
s.executor.created.Stdout.Write([]byte("foo\n"))
s.runner.ArvMountExit <- nil
close(s.runner.ArvMountExit)
+ time.Sleep(10 * time.Second)
+ return 0
}
s.runner.ArvMountExit = make(chan error)
s.testStopContainer(c)
@@ -1114,8 +1389,9 @@ func (s *TestSuite) TestFullRunSetEnv(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
fmt.Fprintf(s.executor.created.Stdout, "%v", s.executor.created.Env)
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -1134,11 +1410,11 @@ func (am *ArvMountCmdLine) ArvMountTest(c []string, token string) (*exec.Cmd, er
return nil, nil
}
-func stubCert(temp string) string {
+func stubCert(c *C, temp string) string {
path := temp + "/ca-certificates.crt"
- crt, _ := os.Create(path)
- crt.Close()
- arvadosclient.CertFiles = []string{path}
+ err := os.WriteFile(path, []byte{}, 0666)
+ c.Assert(err, IsNil)
+ os.Setenv("SSL_CERT_FILE", path)
return path
}
@@ -1146,13 +1422,14 @@ func (s *TestSuite) TestSetupMounts(c *C) {
cr := s.runner
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
+ cr.containerClient, _ = apiStub()
cr.ContainerArvClient = &ArvTestClient{}
cr.ContainerKeepClient = &KeepTestClient{}
cr.Container.OutputStorageClasses = []string{"default"}
realTemp := c.MkDir()
certTemp := c.MkDir()
- stubCertPath := stubCert(certTemp)
+ stubCertPath := stubCert(c, certTemp)
cr.parentTemp = realTemp
i := 0
@@ -1299,7 +1576,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
bindmounts, err := cr.SetupMounts()
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
- "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
+ "--read-write", "--storage-classes", "default", "--crunchstat-interval=5", "--ram-cache",
"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{
"/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
@@ -1382,7 +1659,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
bindmounts, err := cr.SetupMounts()
c.Check(err, IsNil)
c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
- "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
+ "--read-write", "--storage-classes", "default", "--crunchstat-interval=5", "--ram-cache",
"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
c.Check(bindmounts, DeepEquals, map[string]bindmount{
"/tmp": {realTemp + "/tmp2", false},
@@ -1468,7 +1745,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
{
i = 0
cr.ArvMountPoint = ""
- (*GitMountSuite)(nil).useTestGitServer(c)
+ git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
cr.token = arvadostest.ActiveToken
cr.Container.Mounts = make(map[string]arvados.Mount)
cr.Container.Mounts = map[string]arvados.Mount{
@@ -1526,8 +1803,9 @@ func (s *TestSuite) TestStdout(c *C) {
"state": "Locked"
}`
- s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ s.fullRunHelper(c, helperRecord, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env["FROBIZ"])
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -1536,7 +1814,7 @@ func (s *TestSuite) TestStdout(c *C) {
}
// Used by the TestStdoutWithWrongPath*()
-func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func()) (*ArvTestClient, *ContainerRunner, error) {
+func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func() int) (*ArvTestClient, *ContainerRunner, error) {
err := json.Unmarshal([]byte(record), &s.api.Container)
c.Assert(err, IsNil)
s.executor.runFunc = fn
@@ -1552,7 +1830,7 @@ func (s *TestSuite) TestStdoutWithWrongPath(c *C) {
"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path":"/tmpa.out"} },
"output_path": "/tmp",
"state": "Locked"
-}`, func() {})
+}`, func() int { return 0 })
c.Check(err, ErrorMatches, ".*Stdout path does not start with OutputPath.*")
}
@@ -1561,7 +1839,7 @@ func (s *TestSuite) TestStdoutWithWrongKindTmp(c *C) {
"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "tmp", "path":"/tmp/a.out"} },
"output_path": "/tmp",
"state": "Locked"
-}`, func() {})
+}`, func() int { return 0 })
c.Check(err, ErrorMatches, ".*unsupported mount kind 'tmp' for stdout.*")
}
@@ -1570,7 +1848,7 @@ func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {
"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "collection", "path":"/tmp/a.out"} },
"output_path": "/tmp",
"state": "Locked"
-}`, func() {})
+}`, func() int { return 0 })
c.Check(err, ErrorMatches, ".*unsupported mount kind 'collection' for stdout.*")
}
@@ -1585,9 +1863,9 @@ func (s *TestSuite) TestFullRunWithAPI(c *C) {
"priority": 1,
"runtime_constraints": {"API": true},
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
c.Check(s.executor.created.Env["ARVADOS_API_HOST"], Equals, os.Getenv("ARVADOS_API_HOST"))
- s.executor.exit <- 3
+ return 3
})
c.Check(s.api.CalledWith("container.exit_code", 3), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
@@ -1607,8 +1885,9 @@ func (s *TestSuite) TestFullRunSetOutput(c *C) {
"priority": 1,
"runtime_constraints": {"API": true},
"state": "Locked"
-}`, nil, 0, func() {
+}`, nil, func() int {
s.api.Container.Output = arvadostest.DockerImage112PDH
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -1622,9 +1901,9 @@ func (s *TestSuite) TestArvMountRuntimeStatusWarning(c *C) {
ioutil.WriteFile(s.runner.ArvMountPoint+"/by_id/README", nil, 0666)
return s.runner.ArvMountCmd([]string{"bash", "-c", "echo >&2 Test: Keep write error: I am a teapot; sleep 3"}, "")
}
- s.executor.runFunc = func() {
+ s.executor.runFunc = func() int {
time.Sleep(time.Second)
- s.executor.exit <- 137
+ return 137
}
record := `{
"command": ["sleep", "1"],
@@ -1670,8 +1949,9 @@ func (s *TestSuite) TestStdoutWithExcludeFromOutputMountPointUnderOutputDir(c *C
extraMounts := []string{"a3e8f74c6f101eae01fa08bfb4e49b3a+54"}
- s.fullRunHelper(c, helperRecord, extraMounts, 0, func() {
+ s.fullRunHelper(c, helperRecord, extraMounts, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env["FROBIZ"])
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -1696,7 +1976,8 @@ func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {
"output_path": "/tmp",
"priority": 1,
"runtime_constraints": {},
- "state": "Locked"
+ "state": "Locked",
+ "uuid": "zzzzz-dz642-202301130848001"
}`
extraMounts := []string{
@@ -1705,8 +1986,9 @@ func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {
"a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt",
}
- api, _, realtemp := s.fullRunHelper(c, helperRecord, extraMounts, 0, func() {
+ api, _, realtemp := s.fullRunHelper(c, helperRecord, extraMounts, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env["FROBIZ"])
+ return 0
})
c.Check(s.executor.created.BindMounts, DeepEquals, map[string]bindmount{
@@ -1719,22 +2001,25 @@ func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {
c.Check(api.CalledWith("container.exit_code", 0), NotNil)
c.Check(api.CalledWith("container.state", "Complete"), NotNil)
- for _, v := range api.Content {
- if v["collection"] != nil {
- c.Check(v["ensure_unique_name"], Equals, true)
- collection := v["collection"].(arvadosclient.Dict)
- if strings.Index(collection["name"].(string), "output") == 0 {
- manifest := collection["manifest_text"].(string)
-
- c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
+ output_count := uint(0)
+ for _, v := range s.runner.ContainerArvClient.(*ArvTestClient).Content {
+ if v["collection"] == nil {
+ continue
+ }
+ collection := v["collection"].(arvadosclient.Dict)
+ if collection["name"].(string) != "output for zzzzz-dz642-202301130848001" {
+ continue
+ }
+ c.Check(v["ensure_unique_name"], Equals, true)
+ c.Check(collection["manifest_text"].(string), Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:bar 36:18:sub1file2
./foo/baz 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 9:18:sub2file2
./foo/sub1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
./foo/sub1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
`)
- }
- }
+ output_count++
}
+ c.Check(output_count, Not(Equals), uint(0))
}
func (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(c *C) {
@@ -1751,31 +2036,36 @@ func (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(
"output_path": "/tmp",
"priority": 1,
"runtime_constraints": {},
- "state": "Locked"
+ "state": "Locked",
+ "uuid": "zzzzz-dz642-202301130848002"
}`
extraMounts := []string{
"b0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
}
- s.fullRunHelper(c, helperRecord, extraMounts, 0, func() {
+ s.fullRunHelper(c, helperRecord, extraMounts, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env["FROBIZ"])
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
- for _, v := range s.api.Content {
- if v["collection"] != nil {
- collection := v["collection"].(arvadosclient.Dict)
- if strings.Index(collection["name"].(string), "output") == 0 {
- manifest := collection["manifest_text"].(string)
-
- c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
+ output_count := uint(0)
+ for _, v := range s.runner.ContainerArvClient.(*ArvTestClient).Content {
+ if v["collection"] == nil {
+ continue
+ }
+ collection := v["collection"].(arvadosclient.Dict)
+ if collection["name"].(string) != "output for zzzzz-dz642-202301130848002" {
+ continue
+ }
+ c.Check(collection["manifest_text"].(string), Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 10:17:bar
`)
- }
- }
+ output_count++
}
+ c.Check(output_count, Not(Equals), uint(0))
}
func (s *TestSuite) TestOutputError(c *C) {
@@ -1792,8 +2082,9 @@ func (s *TestSuite) TestOutputError(c *C) {
"runtime_constraints": {},
"state": "Locked"
}`
- s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ s.fullRunHelper(c, helperRecord, nil, func() int {
os.Symlink("/etc/hosts", s.runner.HostOutputDir+"/baz")
+ return 0
})
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
@@ -1820,8 +2111,9 @@ func (s *TestSuite) TestStdinCollectionMountPoint(c *C) {
"b0def87f80dd594d4675809e83bd4f15+367/file1_in_main.txt",
}
- api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func() {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env["FROBIZ"])
+ return 0
})
c.Check(api.CalledWith("container.exit_code", 0), NotNil)
@@ -1855,8 +2147,9 @@ func (s *TestSuite) TestStdinJsonMountPoint(c *C) {
"state": "Locked"
}`
- api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ api, _, _ := s.fullRunHelper(c, helperRecord, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env["FROBIZ"])
+ return 0
})
c.Check(api.CalledWith("container.exit_code", 0), NotNil)
@@ -1886,9 +2179,10 @@ func (s *TestSuite) TestStderrMount(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 1, func() {
+}`, nil, func() int {
fmt.Fprintln(s.executor.created.Stdout, "hello")
fmt.Fprintln(s.executor.created.Stderr, "oops")
+ return 1
})
final := api.CalledWith("container.state", "Complete")
@@ -1938,7 +2232,7 @@ func (s *TestSuite) TestFullBrokenDocker(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 0, func() {})
+}`, nil, func() int { return 0 })
c.Check(s.api.CalledWith("container.state", nextState), NotNil)
c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
if s.runner.brokenNodeHook != "" {
@@ -1969,7 +2263,7 @@ func (s *TestSuite) TestBadCommand(c *C) {
"priority": 1,
"runtime_constraints": {},
"state": "Locked"
-}`, nil, 0, func() {})
+}`, nil, func() int { return 0 })
c.Check(s.api.CalledWith("container.state", "Cancelled"), NotNil)
c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
}
@@ -1992,10 +2286,11 @@ func (s *TestSuite) TestSecretTextMountPoint(c *C) {
"state": "Locked"
}`
- s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ s.fullRunHelper(c, helperRecord, nil, func() int {
content, err := ioutil.ReadFile(s.runner.HostOutputDir + "/secret.conf")
c.Check(err, IsNil)
c.Check(string(content), Equals, "mypassword")
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -2021,10 +2316,11 @@ func (s *TestSuite) TestSecretTextMountPoint(c *C) {
}`
s.SetUpTest(c)
- s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ s.fullRunHelper(c, helperRecord, nil, func() int {
content, err := ioutil.ReadFile(s.runner.HostOutputDir + "/secret.conf")
c.Check(err, IsNil)
c.Check(string(content), Equals, "mypassword")
+ return 0
})
c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
@@ -2050,7 +2346,7 @@ func (s *TestSuite) TestSecretTextMountPoint(c *C) {
}`
s.SetUpTest(c)
- _, _, realtemp := s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ _, _, realtemp := s.fullRunHelper(c, helperRecord, nil, func() int {
// secret.conf should be provisioned as a separate
// bind mount, i.e., it should not appear in the
// (fake) fuse filesystem as viewed from the host.
@@ -2060,6 +2356,7 @@ func (s *TestSuite) TestSecretTextMountPoint(c *C) {
}
err = ioutil.WriteFile(s.runner.HostOutputDir+"/.arvados#collection", []byte(`{"manifest_text":". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n"}`), 0700)
c.Check(err, IsNil)
+ return 0
})
content, err := ioutil.ReadFile(realtemp + "/text1/mountdata.text")
@@ -2071,6 +2368,134 @@ func (s *TestSuite) TestSecretTextMountPoint(c *C) {
c.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n"), NotNil)
}
+func (s *TestSuite) TestCalculateCost(c *C) {
+ defer func(s string) { lockdir = s }(lockdir)
+ lockdir = c.MkDir()
+ now := time.Now()
+ cr := s.runner
+ cr.costStartTime = now.Add(-time.Hour)
+ var logbuf bytes.Buffer
+ cr.CrunchLog.Immediate = log.New(&logbuf, "", 0)
+
+ // if there's no InstanceType env var, cost is calculated as 0
+ os.Unsetenv("InstanceType")
+ cost := cr.calculateCost(now)
+ c.Check(cost, Equals, 0.0)
+
+ // with InstanceType env var and loadPrices() hasn't run (or
+ // hasn't found any data), cost is calculated based on
+ // InstanceType env var
+ os.Setenv("InstanceType", `{"Price":1.2}`)
+ defer os.Unsetenv("InstanceType")
+ cost = cr.calculateCost(now)
+ c.Check(cost, Equals, 1.2)
+
+ // first update tells us the spot price was $1/h until 30
+ // minutes ago when it increased to $2/h
+ j, err := json.Marshal([]cloud.InstancePrice{
+ {StartTime: now.Add(-4 * time.Hour), Price: 1.0},
+ {StartTime: now.Add(-time.Hour / 2), Price: 2.0},
+ })
+ c.Assert(err, IsNil)
+ os.WriteFile(lockdir+"/"+pricesfile, j, 0777)
+ cr.loadPrices()
+ cost = cr.calculateCost(now)
+ c.Check(cost, Equals, 1.5)
+
+ // next update (via --list + SIGUSR2) tells us the spot price
+ // increased to $3/h 15 minutes ago
+ j, err = json.Marshal([]cloud.InstancePrice{
+ {StartTime: now.Add(-time.Hour / 3), Price: 2.0}, // dup of -time.Hour/2 price
+ {StartTime: now.Add(-time.Hour / 4), Price: 3.0},
+ })
+ c.Assert(err, IsNil)
+ os.WriteFile(lockdir+"/"+pricesfile, j, 0777)
+ cr.loadPrices()
+ cost = cr.calculateCost(now)
+ c.Check(cost, Equals, 1.0/2+2.0/4+3.0/4)
+
+ cost = cr.calculateCost(now.Add(-time.Hour / 2))
+ c.Check(cost, Equals, 0.5)
+
+ c.Logf("%s", logbuf.String())
+ c.Check(logbuf.String(), Matches, `(?ms).*Instance price changed to 1\.00 at 20.* changed to 2\.00 .* changed to 3\.00 .*`)
+ c.Check(logbuf.String(), Not(Matches), `(?ms).*changed to 2\.00 .* changed to 2\.00 .*`)
+}
+
+func (s *TestSuite) TestSIGUSR2CostUpdate(c *C) {
+ pid := os.Getpid()
+ now := time.Now()
+ pricesJSON, err := json.Marshal([]cloud.InstancePrice{
+ {StartTime: now.Add(-4 * time.Hour), Price: 2.4},
+ {StartTime: now.Add(-2 * time.Hour), Price: 2.6},
+ })
+ c.Assert(err, IsNil)
+
+ os.Setenv("InstanceType", `{"Price":2.2}`)
+ defer os.Unsetenv("InstanceType")
+ defer func(s string) { lockdir = s }(lockdir)
+ lockdir = c.MkDir()
+
+ // We can't use s.api.CalledWith because timing differences will yield
+ // different cost values across runs. getCostUpdate iterates over API
+ // calls until it finds one that sets the cost, then writes that value
+ // to the next index of costUpdates.
+ deadline := now.Add(time.Second)
+ costUpdates := make([]float64, 2)
+ costIndex := 0
+ apiIndex := 0
+ getCostUpdate := func() {
+ for ; time.Now().Before(deadline); time.Sleep(time.Second / 10) {
+ for apiIndex < len(s.api.Content) {
+ update := s.api.Content[apiIndex]
+ apiIndex++
+ var ok bool
+ var cost float64
+ if update, ok = update["container"].(arvadosclient.Dict); !ok {
+ continue
+ }
+ if cost, ok = update["cost"].(float64); !ok {
+ continue
+ }
+ c.Logf("API call #%d updates cost to %v", apiIndex-1, cost)
+ costUpdates[costIndex] = cost
+ costIndex++
+ return
+ }
+ }
+ }
+
+ s.fullRunHelper(c, `{
+ "command": ["true"],
+ "container_image": "`+arvadostest.DockerImage112PDH+`",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {},
+ "state": "Locked",
+ "uuid": "zzzzz-dz642-20230320101530a"
+ }`, nil, func() int {
+ s.runner.costStartTime = now.Add(-3 * time.Hour)
+ err := syscall.Kill(pid, syscall.SIGUSR2)
+ c.Check(err, IsNil, Commentf("error sending first SIGUSR2 to runner"))
+ getCostUpdate()
+
+ err = os.WriteFile(path.Join(lockdir, pricesfile), pricesJSON, 0o700)
+ c.Check(err, IsNil, Commentf("error writing JSON prices file"))
+ err = syscall.Kill(pid, syscall.SIGUSR2)
+ c.Check(err, IsNil, Commentf("error sending second SIGUSR2 to runner"))
+ getCostUpdate()
+
+ return 0
+ })
+ // Comparing with format strings makes it easy to ignore minor variations
+ // in cost across runs while keeping diagnostics pretty.
+ c.Check(fmt.Sprintf("%.3f", costUpdates[0]), Equals, "6.600")
+ c.Check(fmt.Sprintf("%.3f", costUpdates[1]), Equals, "7.600")
+}
+
type FakeProcess struct {
cmdLine []string
}
diff --git a/lib/crunchrun/docker.go b/lib/crunchrun/docker.go
index 54d0e680fe..4f449133f3 100644
--- a/lib/crunchrun/docker.go
+++ b/lib/crunchrun/docker.go
@@ -4,6 +4,7 @@
package crunchrun
import (
+ "context"
"fmt"
"io"
"io/ioutil"
@@ -17,7 +18,6 @@ import (
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerclient "github.com/docker/docker/client"
- "golang.org/x/net/context"
)
// Docker daemon won't let you set a limit less than ~10 MiB
@@ -31,6 +31,11 @@ const minDockerRAM = int64(16 * 1024 * 1024)
// https://docs.docker.com/engine/api/.
const DockerAPIVersion = "1.35"
+// Number of consecutive "inspect container" failures before
+// concluding Docker is unresponsive, giving up, and cancelling the
+// container.
+const dockerWatchdogThreshold = 5
+
type dockerExecutor struct {
containerUUID string
logf func(string, ...interface{})
@@ -47,7 +52,7 @@ func newDockerExecutor(containerUUID string, logf func(string, ...interface{}),
// currently the minimum version we want to support.
client, err := dockerclient.NewClient(dockerclient.DefaultDockerHost, DockerAPIVersion, nil, nil)
if watchdogInterval < 1 {
- watchdogInterval = time.Minute
+ watchdogInterval = time.Minute * 2
}
return &dockerExecutor{
containerUUID: containerUUID,
@@ -182,7 +187,7 @@ func (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, doc
func (e *dockerExecutor) Create(spec containerSpec) error {
cfg, hostCfg := e.config(spec)
- created, err := e.dockerclient.ContainerCreate(context.TODO(), &cfg, &hostCfg, nil, e.containerUUID)
+ created, err := e.dockerclient.ContainerCreate(context.TODO(), &cfg, &hostCfg, nil, nil, e.containerUUID)
if err != nil {
return fmt.Errorf("While creating container: %v", err)
}
@@ -190,8 +195,15 @@ func (e *dockerExecutor) Create(spec containerSpec) error {
return e.startIO(spec.Stdin, spec.Stdout, spec.Stderr)
}
-func (e *dockerExecutor) CgroupID() string {
- return e.containerID
+func (e *dockerExecutor) Pid() int {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))
+ defer cancel()
+ ctr, err := e.dockerclient.ContainerInspect(ctx, e.containerID)
+ if err == nil && ctr.State != nil {
+ return ctr.State.Pid
+ } else {
+ return 0
+ }
}
func (e *dockerExecutor) Start() error {
@@ -225,17 +237,17 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) {
// kill it.
return
} else if err != nil {
- e.logf("Error inspecting container: %s", err)
- watchdogErr <- err
- return
+ watchdogErr <- fmt.Errorf("error inspecting container: %s", err)
} else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
- watchdogErr <- fmt.Errorf("Container is not running: State=%v", ctr.State)
- return
+ watchdogErr <- fmt.Errorf("container is not running: State=%v", ctr.State)
+ } else {
+ watchdogErr <- nil
}
}
}()
waitOk, waitErr := e.dockerclient.ContainerWait(ctx, e.containerID, dockercontainer.WaitConditionNotRunning)
+ errors := 0
for {
select {
case waitBody := <-waitOk:
@@ -250,7 +262,16 @@ func (e *dockerExecutor) Wait(ctx context.Context) (int, error) {
return -1, ctx.Err()
case err := <-watchdogErr:
- return -1, err
+ if err == nil {
+ errors = 0
+ } else {
+ e.logf("docker watchdog: %s", err)
+ errors++
+ if errors >= dockerWatchdogThreshold {
+ e.logf("docker watchdog: giving up")
+ return -1, err
+ }
+ }
}
}
}
diff --git a/lib/crunchrun/executor.go b/lib/crunchrun/executor.go
index 1ed460acd9..308b05cdeb 100644
--- a/lib/crunchrun/executor.go
+++ b/lib/crunchrun/executor.go
@@ -4,10 +4,10 @@
package crunchrun
import (
+ "context"
"io"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "golang.org/x/net/context"
)
type bindmount struct {
@@ -51,8 +51,9 @@ type containerExecutor interface {
// Start the container
Start() error
- // CID the container will belong to
- CgroupID() string
+ // Process ID of a process in the container. Return 0 if
+ // container is finished or no process has started yet.
+ Pid() int
// Stop the container immediately
Stop() error
diff --git a/lib/crunchrun/executor_test.go b/lib/crunchrun/executor_test.go
index e757f579fe..3a91c78641 100644
--- a/lib/crunchrun/executor_test.go
+++ b/lib/crunchrun/executor_test.go
@@ -6,6 +6,7 @@ package crunchrun
import (
"bytes"
+ "context"
"fmt"
"io"
"io/ioutil"
@@ -18,7 +19,6 @@ import (
"git.arvados.org/arvados.git/lib/diagnostics"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "golang.org/x/net/context"
. "gopkg.in/check.v1"
)
@@ -134,6 +134,10 @@ func (s *executorSuite) TestExecCleanEnv(c *C) {
// singularity also sets this by itself (v3.5.2, but not v3.7.4)
case "PROMPT_COMMAND", "PS1", "SINGULARITY_BIND", "SINGULARITY_COMMAND", "SINGULARITY_ENVIRONMENT":
// singularity also sets these by itself (v3.7.4)
+ case "SINGULARITY_NO_EVAL":
+ // our singularity driver sets this to control
+ // singularity behavior, and it gets passed
+ // through to the container
default:
got[kv[0]] = kv[1]
}
diff --git a/lib/crunchrun/git_mount.go b/lib/crunchrun/git_mount.go
index 92bb6d11d9..561ea18de4 100644
--- a/lib/crunchrun/git_mount.go
+++ b/lib/crunchrun/git_mount.go
@@ -48,25 +48,22 @@ func (gm gitMount) validate() error {
// ExtractTree extracts the specified tree into dir, which is an
// existing empty local directory.
-func (gm gitMount) extractTree(ac IArvadosClient, dir string, token string) error {
+func (gm gitMount) extractTree(ac *arvados.Client, dir string, token string) error {
err := gm.validate()
if err != nil {
return err
}
- baseURL, err := ac.Discovery("gitUrl")
+ dd, err := ac.DiscoveryDocument()
if err != nil {
- return fmt.Errorf("discover gitUrl from API: %s", err)
- } else if _, ok := baseURL.(string); !ok {
- return fmt.Errorf("discover gitUrl from API: expected string, found %T", baseURL)
+ return fmt.Errorf("error getting discovery document: %w", err)
}
-
- u, err := url.Parse(baseURL.(string))
+ u, err := url.Parse(dd.GitURL)
if err != nil {
- return fmt.Errorf("parse gitUrl %q: %s", baseURL, err)
+ return fmt.Errorf("parse gitUrl %q: %s", dd.GitURL, err)
}
u, err = u.Parse("/" + gm.UUID + ".git")
if err != nil {
- return fmt.Errorf("build git url from %q, %q: %s", baseURL, gm.UUID, err)
+ return fmt.Errorf("build git url from %q, %q: %s", dd.GitURL, gm.UUID, err)
}
store := memory.NewStorage()
repo, err := git.Init(store, osfs.New(dir))
diff --git a/lib/crunchrun/git_mount_test.go b/lib/crunchrun/git_mount_test.go
index e39beaa943..ac98dcc480 100644
--- a/lib/crunchrun/git_mount_test.go
+++ b/lib/crunchrun/git_mount_test.go
@@ -6,14 +6,11 @@ package crunchrun
import (
"io/ioutil"
- "net/url"
"os"
"path/filepath"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
git_client "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
@@ -26,11 +23,10 @@ type GitMountSuite struct {
var _ = check.Suite(&GitMountSuite{})
func (s *GitMountSuite) SetUpTest(c *check.C) {
- s.useTestGitServer(c)
-
var err error
s.tmpdir, err = ioutil.TempDir("", "")
c.Assert(err, check.IsNil)
+ git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
}
func (s *GitMountSuite) TearDownTest(c *check.C) {
@@ -39,13 +35,14 @@ func (s *GitMountSuite) TearDownTest(c *check.C) {
}
// Commit fd3531f is crunch-run-tree-test
-func (s *GitMountSuite) TestextractTree(c *check.C) {
+func (s *GitMountSuite) TestExtractTree(c *check.C) {
gm := gitMount{
Path: "/",
UUID: arvadostest.Repository2UUID,
Commit: "fd3531f42995344f36c30b79f55f27b502f3d344",
}
- err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+ ac := arvados.NewClientFromEnv()
+ err := gm.extractTree(ac, s.tmpdir, arvadostest.ActiveToken)
c.Check(err, check.IsNil)
fnm := filepath.Join(s.tmpdir, "dir1/dir2/file with mode 0644")
@@ -85,7 +82,7 @@ func (s *GitMountSuite) TestExtractNonTipCommit(c *check.C) {
UUID: arvadostest.Repository2UUID,
Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
}
- err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+ err := gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
c.Check(err, check.IsNil)
fnm := filepath.Join(s.tmpdir, "file only on testbranch")
@@ -100,7 +97,7 @@ func (s *GitMountSuite) TestNonexistentRepository(c *check.C) {
UUID: "zzzzz-s0uqq-nonexistentrepo",
Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
}
- err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+ err := gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
c.Check(err, check.NotNil)
c.Check(err, check.ErrorMatches, ".*repository not found.*")
@@ -113,7 +110,7 @@ func (s *GitMountSuite) TestNonexistentCommit(c *check.C) {
UUID: arvadostest.Repository2UUID,
Commit: "bb66b6bb6b6bbb6b6b6b66b6b6b6b6b6b6b6b66b",
}
- err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+ err := gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
c.Check(err, check.NotNil)
c.Check(err, check.ErrorMatches, ".*object not found.*")
@@ -127,8 +124,8 @@ func (s *GitMountSuite) TestGitUrlDiscoveryFails(c *check.C) {
UUID: arvadostest.Repository2UUID,
Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
}
- err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
- c.Check(err, check.ErrorMatches, ".*gitUrl.*")
+ err := gm.extractTree(&arvados.Client{}, s.tmpdir, arvadostest.ActiveToken)
+ c.Check(err, check.ErrorMatches, ".*error getting discovery doc.*")
}
func (s *GitMountSuite) TestInvalid(c *check.C) {
@@ -186,7 +183,7 @@ func (s *GitMountSuite) TestInvalid(c *check.C) {
matcher: ".*writable.*",
},
} {
- err := trial.gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+ err := trial.gm.extractTree(arvados.NewClientFromEnv(), s.tmpdir, arvadostest.ActiveToken)
c.Check(err, check.NotNil)
s.checkTmpdirContents(c, []string{})
@@ -202,15 +199,3 @@ func (s *GitMountSuite) checkTmpdirContents(c *check.C, expect []string) {
c.Check(err, check.IsNil)
c.Check(names, check.DeepEquals, expect)
}
-
-func (*GitMountSuite) useTestGitServer(c *check.C) {
- git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
-
- loader := config.NewLoader(nil, ctxlog.TestLogger(c))
- cfg, err := loader.Load()
- c.Assert(err, check.IsNil)
- cluster, err := cfg.GetCluster("")
- c.Assert(err, check.IsNil)
-
- discoveryMap["gitUrl"] = (*url.URL)(&cluster.Services.GitHTTP.ExternalURL).String()
-}
diff --git a/lib/crunchrun/integration_test.go b/lib/crunchrun/integration_test.go
index d569020824..ef5cc567db 100644
--- a/lib/crunchrun/integration_test.go
+++ b/lib/crunchrun/integration_test.go
@@ -20,7 +20,6 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
- "git.arvados.org/arvados.git/services/keepstore"
. "gopkg.in/check.v1"
)
@@ -195,7 +194,9 @@ func (s *integrationSuite) TestRunTrivialContainerWithLocalKeepstore(c *C) {
volume.Replication = 2
cluster.Volumes[uuid] = volume
- var v keepstore.UnixVolume
+ var v struct {
+ Root string
+ }
err = json.Unmarshal(volume.DriverParameters, &v)
c.Assert(err, IsNil)
err = os.Mkdir(v.Root, 0777)
@@ -220,6 +221,8 @@ func (s *integrationSuite) TestRunTrivialContainerWithLocalKeepstore(c *C) {
if trial.logConfig == "none" {
c.Check(logExists, Equals, false)
} else {
+ c.Check(log, Matches, `(?ms).*not running trash worker.*`)
+ c.Check(log, Matches, `(?ms).*not running trash emptier.*`)
c.Check(log, trial.matchGetReq, `(?ms).*"reqMethod":"GET".*`)
c.Check(log, trial.matchPutReq, `(?ms).*"reqMethod":"PUT".*,"reqPath":"0e3bcff26d51c895a60ea0d4585e134d".*`)
}
diff --git a/lib/crunchrun/logging.go b/lib/crunchrun/logging.go
index 76a55c4992..91a1b77cf4 100644
--- a/lib/crunchrun/logging.go
+++ b/lib/crunchrun/logging.go
@@ -175,9 +175,9 @@ func ReadWriteLines(in io.Reader, writer io.Writer, done chan<- bool) {
}
// NewThrottledLogger creates a new thottled logger that
-// (a) prepends timestamps to each line
-// (b) batches log messages and only calls the underlying Writer
-// at most once per "crunchLogSecondsBetweenEvents" seconds.
+// - prepends timestamps to each line, and
+// - batches log messages and only calls the underlying Writer
+// at most once per "crunchLogSecondsBetweenEvents" seconds.
func NewThrottledLogger(writer io.WriteCloser) *ThrottledLogger {
tl := &ThrottledLogger{}
tl.flush = make(chan struct{}, 1)
diff --git a/lib/crunchrun/logging_test.go b/lib/crunchrun/logging_test.go
index fdd4f27b7f..42f165fd75 100644
--- a/lib/crunchrun/logging_test.go
+++ b/lib/crunchrun/logging_test.go
@@ -191,6 +191,10 @@ func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytesPerEvent(c *C)
s.testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 50, 67108864, "Exceeded log limit 50 bytes (crunch_limit_log_bytes_per_job)")
}
+func (s *LoggingTestSuite) TestWriteLogsWithZeroBytesPerJob(c *C) {
+ s.testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 0, 67108864, "Exceeded log limit 0 bytes (crunch_limit_log_bytes_per_job)")
+}
+
func (s *LoggingTestSuite) testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, throttleDefault int, expected string) {
discoveryMap[throttleParam] = float64(throttleValue)
defer func() {
diff --git a/lib/crunchrun/singularity.go b/lib/crunchrun/singularity.go
index 1da401f859..fd26297713 100644
--- a/lib/crunchrun/singularity.go
+++ b/lib/crunchrun/singularity.go
@@ -6,6 +6,7 @@ package crunchrun
import (
"bytes"
+ "context"
"errors"
"fmt"
"io/ioutil"
@@ -21,7 +22,6 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "golang.org/x/net/context"
)
type singularityExecutor struct {
@@ -353,8 +353,9 @@ func (e *singularityExecutor) Start() error {
return nil
}
-func (e *singularityExecutor) CgroupID() string {
- return ""
+func (e *singularityExecutor) Pid() int {
+ // see https://dev.arvados.org/issues/17244#note-21
+ return 0
}
func (e *singularityExecutor) Stop() error {
diff --git a/apps/workbench/app/mailers/.gitkeep b/lib/crunchrun/testdata/fakestat/cgroup.procs
similarity index 100%
rename from apps/workbench/app/mailers/.gitkeep
rename to lib/crunchrun/testdata/fakestat/cgroup.procs
diff --git a/apps/workbench/app/models/.gitkeep b/lib/crunchrun/testdata/fakestat/cgroupid/cgroup.procs
similarity index 100%
rename from apps/workbench/app/models/.gitkeep
rename to lib/crunchrun/testdata/fakestat/cgroupid/cgroup.procs
diff --git a/lib/crunchrun/testdata/fakestat/cgroupid/memory.stat b/lib/crunchrun/testdata/fakestat/cgroupid/memory.stat
new file mode 100644
index 0000000000..22f0e13fa9
--- /dev/null
+++ b/lib/crunchrun/testdata/fakestat/cgroupid/memory.stat
@@ -0,0 +1,5 @@
+rss 734003200
+pgmajfault 3200
+total_cache 73400320
+total_pgmajfault 20
+total_swap 320
diff --git a/lib/crunchstat/command.go b/lib/crunchstat/command.go
new file mode 100644
index 0000000000..8c79c139b4
--- /dev/null
+++ b/lib/crunchstat/command.go
@@ -0,0 +1,106 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package crunchstat
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os/exec"
+ "syscall"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/cmd"
+)
+
+var Command = command{}
+
+type command struct{}
+
+func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ flags := flag.NewFlagSet(prog, flag.ExitOnError)
+ poll := flags.Duration("poll", 10*time.Second, "reporting interval")
+ debug := flags.Bool("debug", false, "show additional debug info")
+ dump := flags.String("dump", "", "save snapshot of OS files in given `directory` (for creating test cases)")
+ getVersion := flags.Bool("version", false, "print version information and exit")
+
+ if ok, code := cmd.ParseFlags(flags, prog, args, "program [args ...]", stderr); !ok {
+ return code
+ } else if *getVersion {
+ fmt.Printf("%s %s\n", prog, cmd.Version.String())
+ return 0
+ } else if flags.NArg() == 0 {
+ fmt.Fprintf(stderr, "missing required argument: program (try -help)\n")
+ return 2
+ }
+
+ reporter := &Reporter{
+ Logger: log.New(stderr, prog+": ", 0),
+ Debug: *debug,
+ PollPeriod: *poll,
+ }
+ reporter.Logger.Printf("%s %s", prog, cmd.Version.String())
+ reporter.Logger.Printf("running %v", flags.Args())
+ cmd := exec.Command(flags.Arg(0), flags.Args()[1:]...)
+
+ // Child process will use our stdin and stdout pipes (we close
+ // our copies below)
+ cmd.Stdin = stdin
+ cmd.Stdout = stdout
+ // Child process stderr and our stats will both go to stderr
+ cmd.Stderr = stderr
+
+ if err := cmd.Start(); err != nil {
+ reporter.Logger.Printf("error in cmd.Start: %v", err)
+ return 1
+ }
+ reporter.Pid = func() int {
+ return cmd.Process.Pid
+ }
+ reporter.Start()
+ defer reporter.Stop()
+ if stdin, ok := stdin.(io.Closer); ok {
+ stdin.Close()
+ }
+ if stdout, ok := stdout.(io.Closer); ok {
+ stdout.Close()
+ }
+
+ failed := false
+ if *dump != "" {
+ err := reporter.dumpSourceFiles(*dump)
+ if err != nil {
+ fmt.Fprintf(stderr, "error dumping source files: %s\n", err)
+ failed = true
+ }
+ }
+
+ err := cmd.Wait()
+
+ if err, ok := err.(*exec.ExitError); ok {
+ // The program has exited with an exit code != 0
+
+ // This works on both Unix and Windows. Although
+ // package syscall is generally platform dependent,
+ // WaitStatus is defined for both Unix and Windows and
+ // in both cases has an ExitStatus() method with the
+ // same signature.
+ if status, ok := err.Sys().(syscall.WaitStatus); ok {
+ return status.ExitStatus()
+ } else {
+ reporter.Logger.Printf("ExitError without WaitStatus: %v", err)
+ return 1
+ }
+ } else if err != nil {
+ reporter.Logger.Printf("error running command: %v", err)
+ return 1
+ }
+
+ if failed {
+ return 1
+ }
+ return 0
+}
diff --git a/lib/crunchstat/crunchstat.go b/lib/crunchstat/crunchstat.go
index 10cd7cfce4..bbd0a7fd2f 100644
--- a/lib/crunchstat/crunchstat.go
+++ b/lib/crunchstat/crunchstat.go
@@ -12,53 +12,129 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"io/ioutil"
- "log"
"os"
+ "path/filepath"
+ "regexp"
+ "sort"
"strconv"
"strings"
+ "sync"
"syscall"
"time"
)
+// crunchstat collects all memory statistics, but only reports these.
+var memoryStats = [...]string{"cache", "swap", "pgmajfault", "rss"}
+
+type logPrinter interface {
+ Printf(fmt string, args ...interface{})
+}
+
// A Reporter gathers statistics for a cgroup and writes them to a
// log.Logger.
type Reporter struct {
- // CID of the container to monitor. If empty, read the CID
- // from CIDFile (first waiting until a non-empty file appears
- // at CIDFile). If CIDFile is also empty, report host
- // statistics.
- CID string
-
- // Path to a file we can read CID from.
- CIDFile string
-
- // Where cgroup accounting files live on this system, e.g.,
- // "/sys/fs/cgroup".
- CgroupRoot string
-
- // Parent cgroup, e.g., "docker".
- CgroupParent string
+ // Func that returns the pid of a process inside the desired
+ // cgroup. Reporter will call Pid periodically until it
+ // returns a positive number, then start reporting stats for
+ // the cgroup that process belongs to.
+ //
+ // Pid is used when cgroups v2 is available. For cgroups v1,
+ // see below.
+ Pid func() int
// Interval between samples. Must be positive.
PollPeriod time.Duration
- // Temporary directory, will be monitored for available, used & total space.
+ // Temporary directory, will be monitored for available, used
+ // & total space.
TempDir string
// Where to write statistics. Must not be nil.
- Logger *log.Logger
+ Logger logPrinter
+
+ // When stats cross thresholds configured in the fields below,
+ // they are reported to this logger.
+ ThresholdLogger logPrinter
+
+ // MemThresholds maps memory stat names to slices of thresholds.
+ // When the corresponding stat exceeds a threshold, that will be logged.
+ MemThresholds map[string][]Threshold
+
+ // Filesystem to read /proc entries and cgroup stats from.
+ // Non-nil for testing, nil for real root filesystem.
+ FS fs.FS
+
+ // Enable debug messages.
+ Debug bool
+
+ // available cgroup hierarchies
+ statFiles struct {
+ cpuMax string // v2
+ cpusetCpus string // v1,v2 (via /proc/$PID/cpuset)
+ cpuacctStat string // v1 (via /proc/$PID/cgroup => cpuacct)
+ cpuStat string // v2
+ ioServiceBytes string // v1 (via /proc/$PID/cgroup => blkio)
+ ioStat string // v2
+ memoryStat string // v1 and v2 (but v2 is missing some entries)
+ memoryCurrent string // v2
+ memorySwapCurrent string // v2
+ netDev string // /proc/$PID/net/dev
+ }
- reportedStatFile map[string]string
+ kernelPageSize int64
lastNetSample map[string]ioSample
lastDiskIOSample map[string]ioSample
lastCPUSample cpuSample
lastDiskSpaceSample diskSpaceSample
+ lastMemSample memSample
+ maxDiskSpaceSample diskSpaceSample
+ maxMemSample map[memoryKey]int64
+
+ // process returned by Pid(), whose cgroup stats we are
+ // reporting
+ pid int
+
+ // individual processes whose memory size we are reporting
+ reportPIDs map[string]int
+ reportPIDsMu sync.Mutex
done chan struct{} // closed when we should stop reporting
+ ready chan struct{} // have pid and stat files
flushed chan struct{} // closed when we have made our last report
}
+type Threshold struct {
+ percentage int64
+ threshold int64
+ total int64
+}
+
+func NewThresholdFromPercentage(total int64, percentage int64) Threshold {
+ return Threshold{
+ percentage: percentage,
+ threshold: total * percentage / 100,
+ total: total,
+ }
+}
+
+func NewThresholdsFromPercentages(total int64, percentages []int64) (thresholds []Threshold) {
+ for _, percentage := range percentages {
+ thresholds = append(thresholds, NewThresholdFromPercentage(total, percentage))
+ }
+ return
+}
+
+// memoryKey is a key into Reporter.maxMemSample.
+// Initialize it with just statName to get the host/cgroup maximum.
+// Initialize it with all fields to get that process' maximum.
+type memoryKey struct {
+ processID int
+ processName string
+ statName string
+}
+
// Start starts monitoring in a new goroutine, and returns
// immediately.
//
@@ -72,104 +148,254 @@ type Reporter struct {
// Callers should not modify public data fields after calling Start.
func (r *Reporter) Start() {
r.done = make(chan struct{})
+ r.ready = make(chan struct{})
r.flushed = make(chan struct{})
+ if r.FS == nil {
+ r.FS = os.DirFS("/")
+ }
go r.run()
}
+// ReportPID starts reporting stats for a specified process.
+func (r *Reporter) ReportPID(name string, pid int) {
+ r.reportPIDsMu.Lock()
+ defer r.reportPIDsMu.Unlock()
+ if r.reportPIDs == nil {
+ r.reportPIDs = map[string]int{name: pid}
+ } else {
+ r.reportPIDs[name] = pid
+ }
+}
+
// Stop reporting. Do not call more than once, or before calling
// Start.
//
-// Nothing will be logged after Stop returns.
+// Nothing will be logged after Stop returns unless you call a Log* method.
func (r *Reporter) Stop() {
close(r.done)
<-r.flushed
}
-func (r *Reporter) readAllOrWarn(in io.Reader) ([]byte, error) {
- content, err := ioutil.ReadAll(in)
+var v1keys = map[string]bool{
+ "blkio": true,
+ "cpuacct": true,
+ "cpuset": true,
+ "memory": true,
+}
+
+// Find cgroup hierarchies in /proc/mounts, e.g.,
+//
+// {
+// "blkio": "/sys/fs/cgroup/blkio",
+// "unified": "/sys/fs/cgroup/unified",
+// }
+func (r *Reporter) cgroupMounts() map[string]string {
+ procmounts, err := fs.ReadFile(r.FS, "proc/mounts")
if err != nil {
- r.Logger.Printf("warning: %v", err)
+ r.Logger.Printf("error reading /proc/mounts: %s", err)
+ return nil
}
- return content, err
+ mounts := map[string]string{}
+ for _, line := range bytes.Split(procmounts, []byte{'\n'}) {
+ fields := bytes.SplitN(line, []byte{' '}, 6)
+ if len(fields) != 6 {
+ continue
+ }
+ switch string(fields[2]) {
+ case "cgroup2":
+ // cgroup /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime 0 0
+ mounts["unified"] = string(fields[1])
+ case "cgroup":
+ // cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
+ options := bytes.Split(fields[3], []byte{','})
+ for _, option := range options {
+ option := string(option)
+ if v1keys[option] {
+ mounts[option] = string(fields[1])
+ break
+ }
+ }
+ }
+ }
+ return mounts
}
-// Open the cgroup stats file in /sys/fs corresponding to the target
-// cgroup, and return an io.ReadCloser. If no stats file is available,
-// return nil.
+// generate map of cgroup controller => path for r.pid.
//
-// Log the file that was opened, if it isn't the same file opened on
-// the last openStatFile for this stat.
-//
-// Log "not available" if no file is found and either this stat has
-// been available in the past, or verbose==true.
-//
-// TODO: Instead of trying all options, choose a process in the
-// container, and read /proc/PID/cgroup to determine the appropriate
-// cgroup root for the given statgroup. (This will avoid falling back
-// to host-level stats during container setup and teardown.)
-func (r *Reporter) openStatFile(statgroup, stat string, verbose bool) (io.ReadCloser, error) {
- var paths []string
- if r.CID != "" {
- // Collect container's stats
- paths = []string{
- fmt.Sprintf("%s/%s/%s/%s/%s", r.CgroupRoot, statgroup, r.CgroupParent, r.CID, stat),
- fmt.Sprintf("%s/%s/%s/%s", r.CgroupRoot, r.CgroupParent, r.CID, stat),
+// the "unified" controller represents cgroups v2.
+func (r *Reporter) cgroupPaths(mounts map[string]string) map[string]string {
+ if len(mounts) == 0 {
+ return nil
+ }
+ procdir := fmt.Sprintf("proc/%d", r.pid)
+ buf, err := fs.ReadFile(r.FS, procdir+"/cgroup")
+ if err != nil {
+ r.Logger.Printf("error reading cgroup file: %s", err)
+ return nil
+ }
+ paths := map[string]string{}
+ for _, line := range bytes.Split(buf, []byte{'\n'}) {
+ // The entry for cgroup v2 is always in the format
+ // "0::$PATH" --
+ // https://docs.kernel.org/admin-guide/cgroup-v2.html
+ if bytes.HasPrefix(line, []byte("0::/")) && mounts["unified"] != "" {
+ paths["unified"] = mounts["unified"] + string(line[3:])
+ continue
}
- } else {
- // Collect this host's stats
- paths = []string{
- fmt.Sprintf("%s/%s/%s", r.CgroupRoot, statgroup, stat),
- fmt.Sprintf("%s/%s", r.CgroupRoot, stat),
- }
- }
- var path string
- var file *os.File
- var err error
- for _, path = range paths {
- file, err = os.Open(path)
- if err == nil {
+ // cgroups v1 entries look like
+ // "6:cpu,cpuacct:/user.slice"
+ fields := bytes.SplitN(line, []byte{':'}, 3)
+ if len(fields) != 3 {
+ continue
+ }
+ for _, key := range bytes.Split(fields[1], []byte{','}) {
+ key := string(key)
+ if mounts[key] != "" {
+ paths[key] = mounts[key] + string(fields[2])
+ }
+ }
+ }
+ // In unified mode, /proc/$PID/cgroup doesn't have a cpuset
+ // entry, but we still need it -- there's no cpuset.cpus file
+ // in the cgroup2 subtree indicated by the 0::$PATH entry. We
+ // have to get the right path from /proc/$PID/cpuset.
+ if _, found := paths["cpuset"]; !found && mounts["unified"] != "" {
+ buf, _ := fs.ReadFile(r.FS, procdir+"/cpuset")
+ cpusetPath := string(bytes.TrimRight(buf, "\n"))
+ paths["cpuset"] = mounts["unified"] + cpusetPath
+ }
+ return paths
+}
+
+func (r *Reporter) findStatFiles() {
+ mounts := r.cgroupMounts()
+ paths := r.cgroupPaths(mounts)
+ done := map[*string]bool{}
+ for _, try := range []struct {
+ statFile *string
+ pathkey string
+ file string
+ }{
+ {&r.statFiles.cpuMax, "unified", "cpu.max"},
+ {&r.statFiles.cpusetCpus, "cpuset", "cpuset.cpus.effective"},
+ {&r.statFiles.cpusetCpus, "cpuset", "cpuset.cpus"},
+ {&r.statFiles.cpuacctStat, "cpuacct", "cpuacct.stat"},
+ {&r.statFiles.cpuStat, "unified", "cpu.stat"},
+ // blkio.throttle.io_service_bytes must precede
+ // blkio.io_service_bytes -- on ubuntu1804, the latter
+ // is present but reports 0
+ {&r.statFiles.ioServiceBytes, "blkio", "blkio.throttle.io_service_bytes"},
+ {&r.statFiles.ioServiceBytes, "blkio", "blkio.io_service_bytes"},
+ {&r.statFiles.ioStat, "unified", "io.stat"},
+ {&r.statFiles.memoryStat, "unified", "memory.stat"},
+ {&r.statFiles.memoryStat, "memory", "memory.stat"},
+ {&r.statFiles.memoryCurrent, "unified", "memory.current"},
+ {&r.statFiles.memorySwapCurrent, "unified", "memory.swap.current"},
+ } {
+ startpath, ok := paths[try.pathkey]
+ if !ok || done[try.statFile] {
+ continue
+ }
+ // /proc/$PID/cgroup says cgroup path is
+ // /exa/mple/exa/mple, however, sometimes the file we
+ // need is not under that path, it's only available in
+ // a parent cgroup's dir. So we start at
+ // /sys/fs/cgroup/unified/exa/mple/exa/mple/ and walk
+ // up to /sys/fs/cgroup/unified/ until we find the
+ // desired file.
+ //
+ // This might mean our reported stats include more
+ // cgroups in the cgroup tree, but it's the best we
+ // can do.
+ for path := startpath; path != "" && path != "/" && (path == startpath || strings.HasPrefix(path, mounts[try.pathkey])); path, _ = filepath.Split(strings.TrimRight(path, "/")) {
+ target := strings.TrimLeft(filepath.Join(path, try.file), "/")
+ buf, err := fs.ReadFile(r.FS, target)
+ if err != nil || len(buf) == 0 || bytes.Equal(buf, []byte{'\n'}) {
+ if r.Debug {
+ if os.IsNotExist(err) {
+ // don't stutter
+ err = os.ErrNotExist
+ }
+ r.Logger.Printf("skip /%s: %s", target, err)
+ }
+ continue
+ }
+ *try.statFile = target
+ done[try.statFile] = true
+ r.Logger.Printf("notice: reading stats from /%s", target)
break
- } else {
- path = ""
}
}
- if pathWas := r.reportedStatFile[stat]; pathWas != path {
- // Log whenever we start using a new/different cgroup
- // stat file for a given statistic. This typically
- // happens 1 to 3 times per statistic, depending on
- // whether we happen to collect stats [a] before any
- // processes have been created in the container and
- // [b] after all contained processes have exited.
- if path == "" && verbose {
- r.Logger.Printf("notice: stats not available: stat %s, statgroup %s, cid %s, parent %s, root %s\n", stat, statgroup, r.CID, r.CgroupParent, r.CgroupRoot)
- } else if pathWas != "" {
- r.Logger.Printf("notice: stats moved from %s to %s\n", r.reportedStatFile[stat], path)
- } else {
- r.Logger.Printf("notice: reading stats from %s\n", path)
+
+ netdev := fmt.Sprintf("proc/%d/net/dev", r.pid)
+ if buf, err := fs.ReadFile(r.FS, netdev); err == nil && len(buf) > 0 {
+ r.statFiles.netDev = netdev
+ r.Logger.Printf("using /%s", netdev)
+ }
+}
+
+func (r *Reporter) reportMemoryMax(logger logPrinter, source, statName string, value, limit int64) {
+ var units string
+ switch statName {
+ case "pgmajfault":
+ units = "faults"
+ default:
+ units = "bytes"
+ }
+ if limit > 0 {
+ percentage := 100 * value / limit
+ logger.Printf("Maximum %s memory %s usage was %d%%, %d/%d %s",
+ source, statName, percentage, value, limit, units)
+ } else {
+ logger.Printf("Maximum %s memory %s usage was %d %s",
+ source, statName, value, units)
+ }
+}
+
+func (r *Reporter) LogMaxima(logger logPrinter, memLimits map[string]int64) {
+ if r.lastCPUSample.hasData {
+ logger.Printf("Total CPU usage was %f user and %f sys on %.2f CPUs",
+ r.lastCPUSample.user, r.lastCPUSample.sys, r.lastCPUSample.cpus)
+ }
+ for disk, sample := range r.lastDiskIOSample {
+ logger.Printf("Total disk I/O on %s was %d bytes written and %d bytes read",
+ disk, sample.txBytes, sample.rxBytes)
+ }
+ if r.maxDiskSpaceSample.total > 0 {
+ percentage := 100 * r.maxDiskSpaceSample.used / r.maxDiskSpaceSample.total
+ logger.Printf("Maximum disk usage was %d%%, %d/%d bytes",
+ percentage, r.maxDiskSpaceSample.used, r.maxDiskSpaceSample.total)
+ }
+ for _, statName := range memoryStats {
+ value, ok := r.maxMemSample[memoryKey{statName: "total_" + statName}]
+ if !ok {
+ value, ok = r.maxMemSample[memoryKey{statName: statName}]
+ }
+ if ok {
+ r.reportMemoryMax(logger, "container", statName, value, memLimits[statName])
}
- r.reportedStatFile[stat] = path
}
- return file, err
+ for ifname, sample := range r.lastNetSample {
+ logger.Printf("Total network I/O on %s was %d bytes written and %d bytes read",
+ ifname, sample.txBytes, sample.rxBytes)
+ }
}
-func (r *Reporter) getContainerNetStats() (io.Reader, error) {
- procsFile, err := r.openStatFile("cpuacct", "cgroup.procs", true)
- if err != nil {
- return nil, err
- }
- defer procsFile.Close()
- reader := bufio.NewScanner(procsFile)
- for reader.Scan() {
- taskPid := reader.Text()
- statsFilename := fmt.Sprintf("/proc/%s/net/dev", taskPid)
- stats, err := ioutil.ReadFile(statsFilename)
- if err != nil {
- r.Logger.Printf("notice: %v", err)
+func (r *Reporter) LogProcessMemMax(logger logPrinter) {
+ for memKey, value := range r.maxMemSample {
+ if memKey.processName == "" {
continue
}
- return strings.NewReader(string(stats)), nil
+ r.reportMemoryMax(logger, memKey.processName, memKey.statName, value, 0)
+ }
+}
+
+func (r *Reporter) readAllOrWarn(in io.Reader) ([]byte, error) {
+ content, err := ioutil.ReadAll(in)
+ if err != nil {
+ r.Logger.Printf("warning: %v", err)
}
- return nil, errors.New("Could not read stats for any proc in container")
+ return content, err
}
type ioSample struct {
@@ -179,33 +405,58 @@ type ioSample struct {
}
func (r *Reporter) doBlkIOStats() {
- c, err := r.openStatFile("blkio", "blkio.io_service_bytes", true)
- if err != nil {
- return
- }
- defer c.Close()
- b := bufio.NewScanner(c)
var sampleTime = time.Now()
newSamples := make(map[string]ioSample)
- for b.Scan() {
- var device, op string
- var val int64
- if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &val); err != nil {
- continue
+
+ if r.statFiles.ioStat != "" {
+ statfile, err := fs.ReadFile(r.FS, r.statFiles.ioStat)
+ if err != nil {
+ return
}
- var thisSample ioSample
- var ok bool
- if thisSample, ok = newSamples[device]; !ok {
- thisSample = ioSample{sampleTime, -1, -1}
+ for _, line := range bytes.Split(statfile, []byte{'\n'}) {
+ // 254:16 rbytes=72163328 wbytes=117370880 rios=3811 wios=3906 dbytes=0 dios=0
+ words := bytes.Split(line, []byte{' '})
+ if len(words) < 2 {
+ continue
+ }
+ thisSample := ioSample{sampleTime, -1, -1}
+ for _, kv := range words[1:] {
+ if bytes.HasPrefix(kv, []byte("rbytes=")) {
+ fmt.Sscanf(string(kv[7:]), "%d", &thisSample.rxBytes)
+ } else if bytes.HasPrefix(kv, []byte("wbytes=")) {
+ fmt.Sscanf(string(kv[7:]), "%d", &thisSample.txBytes)
+ }
+ }
+ if thisSample.rxBytes >= 0 && thisSample.txBytes >= 0 {
+ newSamples[string(words[0])] = thisSample
+ }
}
- switch op {
- case "Read":
- thisSample.rxBytes = val
- case "Write":
- thisSample.txBytes = val
+ } else if r.statFiles.ioServiceBytes != "" {
+ statfile, err := fs.ReadFile(r.FS, r.statFiles.ioServiceBytes)
+ if err != nil {
+ return
+ }
+ for _, line := range bytes.Split(statfile, []byte{'\n'}) {
+ var device, op string
+ var val int64
+ if _, err := fmt.Sscanf(string(line), "%s %s %d", &device, &op, &val); err != nil {
+ continue
+ }
+ var thisSample ioSample
+ var ok bool
+ if thisSample, ok = newSamples[device]; !ok {
+ thisSample = ioSample{sampleTime, -1, -1}
+ }
+ switch op {
+ case "Read":
+ thisSample.rxBytes = val
+ case "Write":
+ thisSample.txBytes = val
+ }
+ newSamples[device] = thisSample
}
- newSamples[device] = thisSample
}
+
for dev, sample := range newSamples {
if sample.txBytes < 0 || sample.rxBytes < 0 {
continue
@@ -227,15 +478,17 @@ type memSample struct {
memStat map[string]int64
}
-func (r *Reporter) doMemoryStats() {
- c, err := r.openStatFile("memory", "memory.stat", true)
+func (r *Reporter) getMemSample() {
+ thisSample := memSample{time.Now(), make(map[string]int64)}
+
+ // memory.stat contains "pgmajfault" in cgroups v1 and v2. It
+ // also contains "rss", "swap", and "cache" in cgroups v1.
+ c, err := r.FS.Open(r.statFiles.memoryStat)
if err != nil {
return
}
defer c.Close()
b := bufio.NewScanner(c)
- thisSample := memSample{time.Now(), make(map[string]int64)}
- wantStats := [...]string{"cache", "swap", "pgmajfault", "rss"}
for b.Scan() {
var stat string
var val int64
@@ -244,27 +497,163 @@ func (r *Reporter) doMemoryStats() {
}
thisSample.memStat[stat] = val
}
+
+ // In cgroups v2, we need to read "memory.current" and
+ // "memory.swap.current" as well.
+ for stat, fnm := range map[string]string{
+ // memory.current includes cache. We don't get
+ // separate rss/cache values, so we call
+ // memory usage "rss" for compatibility, and
+ // omit "cache".
+ "rss": r.statFiles.memoryCurrent,
+ "swap": r.statFiles.memorySwapCurrent,
+ } {
+ if fnm == "" {
+ continue
+ }
+ buf, err := fs.ReadFile(r.FS, fnm)
+ if err != nil {
+ continue
+ }
+ var val int64
+ _, err = fmt.Sscanf(string(buf), "%d", &val)
+ if err != nil {
+ continue
+ }
+ thisSample.memStat[stat] = val
+ }
+ for stat, val := range thisSample.memStat {
+ maxKey := memoryKey{statName: stat}
+ if val > r.maxMemSample[maxKey] {
+ r.maxMemSample[maxKey] = val
+ }
+ }
+ r.lastMemSample = thisSample
+
+ if r.ThresholdLogger != nil {
+ for statName, thresholds := range r.MemThresholds {
+ statValue, ok := thisSample.memStat["total_"+statName]
+ if !ok {
+ statValue, ok = thisSample.memStat[statName]
+ if !ok {
+ continue
+ }
+ }
+ var index int
+ var statThreshold Threshold
+ for index, statThreshold = range thresholds {
+ if statValue < statThreshold.threshold {
+ break
+ } else if statThreshold.percentage > 0 {
+ r.ThresholdLogger.Printf("Container using over %d%% of memory (%s %d/%d bytes)",
+ statThreshold.percentage, statName, statValue, statThreshold.total)
+ } else {
+ r.ThresholdLogger.Printf("Container using over %d of memory (%s %s bytes)",
+ statThreshold.threshold, statName, statValue)
+ }
+ }
+ r.MemThresholds[statName] = thresholds[index:]
+ }
+ }
+}
+
+func (r *Reporter) reportMemSample() {
var outstat bytes.Buffer
- for _, key := range wantStats {
+ for _, key := range memoryStats {
// Use "total_X" stats (entire hierarchy) if enabled,
// otherwise just the single cgroup -- see
// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
- if val, ok := thisSample.memStat["total_"+key]; ok {
+ if val, ok := r.lastMemSample.memStat["total_"+key]; ok {
fmt.Fprintf(&outstat, " %d %s", val, key)
- } else if val, ok := thisSample.memStat[key]; ok {
+ } else if val, ok := r.lastMemSample.memStat[key]; ok {
fmt.Fprintf(&outstat, " %d %s", val, key)
}
}
r.Logger.Printf("mem%s\n", outstat.String())
}
+func (r *Reporter) doProcmemStats() {
+ if r.kernelPageSize == 0 {
+ // assign "don't try again" value in case we give up
+ // and return without assigning the real value
+ r.kernelPageSize = -1
+ buf, err := fs.ReadFile(r.FS, "proc/self/smaps")
+ if err != nil {
+ r.Logger.Printf("error reading /proc/self/smaps: %s", err)
+ return
+ }
+ m := regexp.MustCompile(`\nKernelPageSize:\s*(\d+) kB\n`).FindSubmatch(buf)
+ if len(m) != 2 {
+ r.Logger.Printf("error parsing /proc/self/smaps: KernelPageSize not found")
+ return
+ }
+ size, err := strconv.ParseInt(string(m[1]), 10, 64)
+ if err != nil {
+ r.Logger.Printf("error parsing /proc/self/smaps: KernelPageSize %q: %s", m[1], err)
+ return
+ }
+ r.kernelPageSize = size * 1024
+ } else if r.kernelPageSize < 0 {
+ // already failed to determine page size, don't keep
+ // trying/logging
+ return
+ }
+
+ r.reportPIDsMu.Lock()
+ defer r.reportPIDsMu.Unlock()
+ procnames := make([]string, 0, len(r.reportPIDs))
+ for name := range r.reportPIDs {
+ procnames = append(procnames, name)
+ }
+ sort.Strings(procnames)
+ procmem := ""
+ for _, procname := range procnames {
+ pid := r.reportPIDs[procname]
+ buf, err := fs.ReadFile(r.FS, fmt.Sprintf("proc/%d/stat", pid))
+ if err != nil {
+ continue
+ }
+ // If the executable name contains a ')' char,
+ // /proc/$pid/stat will look like '1234 (exec name)) S
+ // 123 ...' -- the last ')' is the end of the 2nd
+ // field.
+ paren := bytes.LastIndexByte(buf, ')')
+ if paren < 0 {
+ continue
+ }
+ fields := bytes.SplitN(buf[paren:], []byte{' '}, 24)
+ if len(fields) < 24 {
+ continue
+ }
+ // rss is the 24th field in .../stat, and fields[0]
+ // here is the last char ')' of the 2nd field, so
+ // rss is fields[22]
+ rss, err := strconv.ParseInt(string(fields[22]), 10, 64)
+ if err != nil {
+ continue
+ }
+ value := rss * r.kernelPageSize
+ procmem += fmt.Sprintf(" %d %s", value, procname)
+ maxKey := memoryKey{pid, procname, "rss"}
+ if value > r.maxMemSample[maxKey] {
+ r.maxMemSample[maxKey] = value
+ }
+ }
+ if procmem != "" {
+ r.Logger.Printf("procmem%s\n", procmem)
+ }
+}
+
func (r *Reporter) doNetworkStats() {
+ if r.statFiles.netDev == "" {
+ return
+ }
sampleTime := time.Now()
- stats, err := r.getContainerNetStats()
+ stats, err := r.FS.Open(r.statFiles.netDev)
if err != nil {
return
}
-
+ defer stats.Close()
scanner := bufio.NewScanner(stats)
for scanner.Scan() {
var ifName string
@@ -324,6 +713,9 @@ func (r *Reporter) doDiskSpaceStats() {
used: (s.Blocks - s.Bfree) * bs,
available: s.Bavail * bs,
}
+ if nextSample.used > r.maxDiskSpaceSample.used {
+ r.maxDiskSpaceSample = nextSample
+ }
var delta string
if r.lastDiskSpaceSample.hasData {
@@ -343,55 +735,100 @@ type cpuSample struct {
sampleTime time.Time
user float64
sys float64
- cpus int64
+ cpus float64
}
-// Return the number of CPUs available in the container. Return 0 if
-// we can't figure out the real number of CPUs.
-func (r *Reporter) getCPUCount() int64 {
- cpusetFile, err := r.openStatFile("cpuset", "cpuset.cpus", true)
- if err != nil {
- return 0
+// Return the number of virtual CPUs available in the container. This
+// can be based on a scheduling ratio (which is not necessarily a
+// whole number) or a restricted set of accessible CPUs.
+//
+// Return the number of host processors based on /proc/cpuinfo if
+// cgroupfs doesn't reveal anything.
+//
+// Return 0 if even that doesn't work.
+func (r *Reporter) getCPUCount() float64 {
+ if buf, err := fs.ReadFile(r.FS, r.statFiles.cpuMax); err == nil {
+ // cpu.max looks like "150000 100000" if CPU usage is
+ // restricted to 150% (docker run --cpus=1.5), or "max
+ // 100000\n" if not.
+ var max, period int64
+ if _, err := fmt.Sscanf(string(buf), "%d %d", &max, &period); err == nil {
+ return float64(max) / float64(period)
+ }
}
- defer cpusetFile.Close()
- b, err := r.readAllOrWarn(cpusetFile)
- if err != nil {
- return 0
+ if buf, err := fs.ReadFile(r.FS, r.statFiles.cpusetCpus); err == nil {
+ // cpuset.cpus looks like "0,4-7\n" if only CPUs
+ // 0,4,5,6,7 are available.
+ cpus := 0
+ for _, v := range bytes.Split(buf, []byte{','}) {
+ var min, max int
+ n, _ := fmt.Sscanf(string(v), "%d-%d", &min, &max)
+ if n == 2 {
+ cpus += (max - min) + 1
+ } else {
+ cpus++
+ }
+ }
+ return float64(cpus)
}
- sp := strings.Split(string(b), ",")
- cpus := int64(0)
- for _, v := range sp {
- var min, max int64
- n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
- if n == 2 {
- cpus += (max - min) + 1
- } else {
- cpus++
+ if buf, err := fs.ReadFile(r.FS, "proc/cpuinfo"); err == nil {
+ // cpuinfo has a line like "processor\t: 0\n" for each
+ // CPU.
+ cpus := 0
+ for _, line := range bytes.Split(buf, []byte{'\n'}) {
+ if bytes.HasPrefix(line, []byte("processor\t:")) {
+ cpus++
+ }
}
+ return float64(cpus)
}
- return cpus
+ return 0
}
func (r *Reporter) doCPUStats() {
- statFile, err := r.openStatFile("cpuacct", "cpuacct.stat", true)
- if err != nil {
- return
- }
- defer statFile.Close()
- b, err := r.readAllOrWarn(statFile)
- if err != nil {
- return
- }
+ var nextSample cpuSample
+ if r.statFiles.cpuStat != "" {
+ // v2
+ f, err := r.FS.Open(r.statFiles.cpuStat)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+ nextSample = cpuSample{
+ hasData: true,
+ sampleTime: time.Now(),
+ cpus: r.getCPUCount(),
+ }
+ for {
+ var stat string
+ var val int64
+ n, err := fmt.Fscanf(f, "%s %d\n", &stat, &val)
+ if err != nil || n != 2 {
+ break
+ }
+ if stat == "user_usec" {
+ nextSample.user = float64(val) / 1000000
+ } else if stat == "system_usec" {
+ nextSample.sys = float64(val) / 1000000
+ }
+ }
+ } else if r.statFiles.cpuacctStat != "" {
+ // v1
+ b, err := fs.ReadFile(r.FS, r.statFiles.cpuacctStat)
+ if err != nil {
+ return
+ }
- var userTicks, sysTicks int64
- fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
- userHz := float64(100)
- nextSample := cpuSample{
- hasData: true,
- sampleTime: time.Now(),
- user: float64(userTicks) / userHz,
- sys: float64(sysTicks) / userHz,
- cpus: r.getCPUCount(),
+ var userTicks, sysTicks int64
+ fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
+ userHz := float64(100)
+ nextSample = cpuSample{
+ hasData: true,
+ sampleTime: time.Now(),
+ user: float64(userTicks) / userHz,
+ sys: float64(sysTicks) / userHz,
+ cpus: r.getCPUCount(),
+ }
}
delta := ""
@@ -401,21 +838,32 @@ func (r *Reporter) doCPUStats() {
nextSample.user-r.lastCPUSample.user,
nextSample.sys-r.lastCPUSample.sys)
}
- r.Logger.Printf("cpu %.4f user %.4f sys %d cpus%s\n",
+ r.Logger.Printf("cpu %.4f user %.4f sys %.2f cpus%s\n",
nextSample.user, nextSample.sys, nextSample.cpus, delta)
r.lastCPUSample = nextSample
}
+func (r *Reporter) doAllStats() {
+ r.reportMemSample()
+ r.doProcmemStats()
+ r.doCPUStats()
+ r.doBlkIOStats()
+ r.doNetworkStats()
+ r.doDiskSpaceStats()
+}
+
// Report stats periodically until we learn (via r.done) that someone
// called Stop.
func (r *Reporter) run() {
defer close(r.flushed)
- r.reportedStatFile = make(map[string]string)
+ r.maxMemSample = make(map[memoryKey]int64)
- if !r.waitForCIDFile() || !r.waitForCgroup() {
+ if !r.waitForPid() {
return
}
+ r.findStatFiles()
+ close(r.ready)
r.lastNetSample = make(map[string]ioSample)
r.lastDiskIOSample = make(map[string]ioSample)
@@ -428,66 +876,111 @@ func (r *Reporter) run() {
r.Logger.Printf("notice: monitoring temp dir %s\n", r.TempDir)
}
- ticker := time.NewTicker(r.PollPeriod)
+ r.getMemSample()
+ r.doAllStats()
+
+ if r.PollPeriod < 1 {
+ r.PollPeriod = time.Second * 10
+ }
+
+ memTicker := time.NewTicker(time.Second)
+ mainTicker := time.NewTicker(r.PollPeriod)
for {
- r.doMemoryStats()
- r.doCPUStats()
- r.doBlkIOStats()
- r.doNetworkStats()
- r.doDiskSpaceStats()
select {
case <-r.done:
return
- case <-ticker.C:
+ case <-memTicker.C:
+ r.getMemSample()
+ case <-mainTicker.C:
+ r.doAllStats()
}
}
}
-// If CID is empty, wait for it to appear in CIDFile. Return true if
-// we get it before we learn (via r.done) that someone called Stop.
-func (r *Reporter) waitForCIDFile() bool {
- if r.CID != "" || r.CIDFile == "" {
- return true
- }
-
+// Wait for Pid() to return a real pid. Return true if this succeeds
+// before Stop is called.
+func (r *Reporter) waitForPid() bool {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
+ warningTimer := time.After(r.PollPeriod)
for {
- cid, err := ioutil.ReadFile(r.CIDFile)
- if err == nil && len(cid) > 0 {
- r.CID = string(cid)
- return true
+ r.pid = r.Pid()
+ if r.pid > 0 {
+ break
}
select {
case <-ticker.C:
+ case <-warningTimer:
+ r.Logger.Printf("warning: Pid() did not return a process ID after %v (config error?) -- still waiting...", r.PollPeriod)
case <-r.done:
- r.Logger.Printf("warning: CID never appeared in %+q: %v", r.CIDFile, err)
+ r.Logger.Printf("warning: Pid() never returned a process ID")
return false
}
}
+ return true
}
-// Wait for the cgroup stats files to appear in cgroup_root. Return
-// true if they appear before r.done indicates someone called Stop. If
-// they don't appear within one poll interval, log a warning and keep
-// waiting.
-func (r *Reporter) waitForCgroup() bool {
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
- warningTimer := time.After(r.PollPeriod)
- for {
- c, err := r.openStatFile("cpuacct", "cgroup.procs", false)
- if err == nil {
- c.Close()
- return true
+func (r *Reporter) dumpSourceFiles(destdir string) error {
+ select {
+ case <-r.done:
+ return errors.New("reporter was never ready")
+ case <-r.ready:
+ }
+ todo := []string{
+ fmt.Sprintf("proc/%d/cgroup", r.pid),
+ fmt.Sprintf("proc/%d/cpuset", r.pid),
+ "proc/cpuinfo",
+ "proc/mounts",
+ "proc/self/smaps",
+ r.statFiles.cpuMax,
+ r.statFiles.cpusetCpus,
+ r.statFiles.cpuacctStat,
+ r.statFiles.cpuStat,
+ r.statFiles.ioServiceBytes,
+ r.statFiles.ioStat,
+ r.statFiles.memoryStat,
+ r.statFiles.memoryCurrent,
+ r.statFiles.memorySwapCurrent,
+ r.statFiles.netDev,
+ }
+ for _, path := range todo {
+ if path == "" {
+ continue
}
- select {
- case <-ticker.C:
- case <-warningTimer:
- r.Logger.Printf("warning: cgroup stats files have not appeared after %v (config error?) -- still waiting...", r.PollPeriod)
- case <-r.done:
- r.Logger.Printf("warning: cgroup stats files never appeared for %v", r.CID)
- return false
+ err := r.createParentsAndCopyFile(destdir, path)
+ if err != nil {
+ return err
+ }
+ }
+ r.reportPIDsMu.Lock()
+ r.reportPIDsMu.Unlock()
+ for _, pid := range r.reportPIDs {
+ path := fmt.Sprintf("proc/%d/stat", pid)
+ err := r.createParentsAndCopyFile(destdir, path)
+ if err != nil {
+ return err
+ }
+ }
+ if proc, err := os.FindProcess(r.pid); err != nil || proc.Signal(syscall.Signal(0)) != nil {
+ return fmt.Errorf("process %d no longer exists, snapshot is probably broken", r.pid)
+ }
+ return nil
+}
+
+func (r *Reporter) createParentsAndCopyFile(destdir, path string) error {
+ buf, err := fs.ReadFile(r.FS, path)
+ if os.IsNotExist(err) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ if parent, _ := filepath.Split(path); parent != "" {
+ err = os.MkdirAll(destdir+"/"+parent, 0777)
+ if err != nil {
+ return fmt.Errorf("mkdir %s: %s", destdir+"/"+parent, err)
}
}
+ destfile := destdir + "/" + path
+ r.Logger.Printf("copy %s to %s -- size %d", path, destfile, len(buf))
+ return os.WriteFile(destfile, buf, 0777)
}
diff --git a/lib/crunchstat/crunchstat_test.go b/lib/crunchstat/crunchstat_test.go
index c27e39241d..415c58a533 100644
--- a/lib/crunchstat/crunchstat_test.go
+++ b/lib/crunchstat/crunchstat_test.go
@@ -5,62 +5,254 @@
package crunchstat
import (
- "bufio"
- "io"
- "log"
+ "bytes"
+ "fmt"
+ "io/fs"
"os"
"regexp"
+ "runtime"
+ "strconv"
"testing"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ . "gopkg.in/check.v1"
)
-func bufLogger() (*log.Logger, *bufio.Reader) {
- r, w := io.Pipe()
- logger := log.New(w, "", 0)
- return logger, bufio.NewReader(r)
-}
-
-func TestReadAllOrWarnFail(t *testing.T) {
- logger, rcv := bufLogger()
- rep := Reporter{Logger: logger}
-
- done := make(chan bool)
- var msg []byte
- var err error
- go func() {
- msg, err = rcv.ReadBytes('\n')
- close(done)
- }()
- {
- // The special file /proc/self/mem can be opened for
- // reading, but reading from byte 0 returns an error.
- f, err := os.Open("/proc/self/mem")
- if err != nil {
- t.Fatalf("Opening /proc/self/mem: %s", err)
+const logMsgPrefix = `(?m)(.*\n)*.* msg="`
+
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+var _ = Suite(&suite{})
+
+type testdatasource struct {
+ fspath string
+ pid int
+}
+
+func (s testdatasource) Pid() int {
+ return s.pid
+}
+func (s testdatasource) FS() fs.FS {
+ return os.DirFS(s.fspath)
+}
+
+// To generate a test case for a new OS target, build
+// cmd/arvados-server and run
+//
+// arvados-server crunchstat -dump ./testdata/example1234 sleep 2
+var testdata = map[string]testdatasource{
+ "debian10": {fspath: "testdata/debian10", pid: 3288},
+ "debian11": {fspath: "testdata/debian11", pid: 4153022},
+ "debian12": {fspath: "testdata/debian12", pid: 1115883},
+ "ubuntu1804": {fspath: "testdata/ubuntu1804", pid: 2523},
+ "ubuntu2004": {fspath: "testdata/ubuntu2004", pid: 1360},
+ "ubuntu2204": {fspath: "testdata/ubuntu2204", pid: 1967},
+}
+
+type suite struct {
+ logbuf bytes.Buffer
+ logger *logrus.Logger
+ debian12MemoryCurrent int64
+}
+
+func (s *suite) SetUpSuite(c *C) {
+ s.logger = logrus.New()
+ s.logger.Out = &s.logbuf
+
+ buf, err := os.ReadFile("testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current")
+ c.Assert(err, IsNil)
+ _, err = fmt.Sscanf(string(buf), "%d", &s.debian12MemoryCurrent)
+ c.Assert(err, IsNil)
+}
+
+func (s *suite) SetUpTest(c *C) {
+ s.logbuf.Reset()
+}
+
+// Report stats for the current (go test) process's cgroup, using the
+// test host's real procfs/sysfs.
+func (s *suite) TestReportCurrent(c *C) {
+ r := Reporter{
+ Pid: os.Getpid,
+ Logger: s.logger,
+ PollPeriod: time.Second,
+ }
+ r.Start()
+ defer r.Stop()
+ checkPatterns := []string{
+ `(?ms).*rss.*`,
+ `(?ms).*net:.*`,
+ `(?ms).*blkio:.*`,
+ `(?ms).* [\d.]+ user [\d.]+ sys ` + fmt.Sprintf("%.2f", float64(runtime.NumCPU())) + ` cpus -- .*`,
+ }
+ for deadline := time.Now().Add(4 * time.Second); !c.Failed(); time.Sleep(time.Millisecond) {
+ done := true
+ for _, pattern := range checkPatterns {
+ if m := regexp.MustCompile(pattern).FindSubmatch(s.logbuf.Bytes()); len(m) == 0 {
+ done = false
+ if time.Now().After(deadline) {
+ c.Errorf("timed out waiting for %s", pattern)
+ }
+ }
+ }
+ if done {
+ break
+ }
+ }
+ c.Logf("%s", s.logbuf.String())
+}
+
+// Report stats for a the current (go test) process.
+func (s *suite) TestReportPIDs(c *C) {
+ r := Reporter{
+ Pid: func() int { return 1 },
+ Logger: s.logger,
+ PollPeriod: time.Second,
+ }
+ r.Start()
+ defer r.Stop()
+ r.ReportPID("init", 1)
+ r.ReportPID("test_process", os.Getpid())
+ r.ReportPID("nonexistent", 12345) // should be silently ignored/omitted
+ for deadline := time.Now().Add(10 * time.Second); ; time.Sleep(time.Millisecond) {
+ if time.Now().After(deadline) {
+ c.Error("timed out")
+ break
}
- if x, err := rep.readAllOrWarn(f); err == nil {
- t.Fatalf("Expected error, got %v", x)
+ if m := regexp.MustCompile(`(?ms).*procmem \d+ init (\d+) test_process.*`).FindSubmatch(s.logbuf.Bytes()); len(m) > 0 {
+ size, err := strconv.ParseInt(string(m[1]), 10, 64)
+ c.Check(err, IsNil)
+ // Expect >1 MiB and <100 MiB -- otherwise we
+ // are probably misinterpreting /proc/N/stat
+ // or multiplying by the wrong page size.
+ c.Check(size > 1000000, Equals, true)
+ c.Check(size < 100000000, Equals, true)
+ break
}
}
- <-done
- if err != nil {
- t.Fatal(err)
- } else if matched, err := regexp.MatchString("^warning: read /proc/self/mem: .*", string(msg)); err != nil || !matched {
- t.Fatalf("Expected error message about unreadable file, got \"%s\"", msg)
+ c.Logf("%s", s.logbuf.String())
+}
+
+func (s *suite) TestAllTestdata(c *C) {
+ for platform, datasource := range testdata {
+ s.logbuf.Reset()
+ c.Logf("=== %s", platform)
+ rep := Reporter{
+ Pid: datasource.Pid,
+ FS: datasource.FS(),
+ Logger: s.logger,
+ PollPeriod: time.Second,
+ ThresholdLogger: s.logger,
+ Debug: true,
+ }
+ rep.Start()
+ rep.Stop()
+ logs := s.logbuf.String()
+ c.Logf("%s", logs)
+ c.Check(logs, Matches, `(?ms).* \d\d+ rss\\n.*`)
+ c.Check(logs, Matches, `(?ms).*blkio:\d+:\d+ \d+ write \d+ read\\n.*`)
+ c.Check(logs, Matches, `(?ms).*net:\S+ \d+ tx \d+ rx\\n.*`)
+ c.Check(logs, Matches, `(?ms).* [\d.]+ user [\d.]+ sys [2-9]\d*\.\d\d cpus.*`)
}
}
-func TestReadAllOrWarnSuccess(t *testing.T) {
- rep := Reporter{Logger: log.New(os.Stderr, "", 0)}
+func (s *suite) testRSSThresholds(c *C, rssPercentages []int64, alertCount int) {
+ c.Assert(alertCount <= len(rssPercentages), Equals, true)
+ rep := Reporter{
+ Pid: testdata["debian12"].Pid,
+ FS: testdata["debian12"].FS(),
+ Logger: s.logger,
+ MemThresholds: map[string][]Threshold{
+ "rss": NewThresholdsFromPercentages(s.debian12MemoryCurrent*3/2, rssPercentages),
+ },
+ PollPeriod: time.Second * 10,
+ ThresholdLogger: s.logger,
+ }
+ rep.Start()
+ rep.Stop()
+ logs := s.logbuf.String()
+ c.Logf("%s", logs)
+
+ for index, expectPercentage := range rssPercentages[:alertCount] {
+ var logCheck Checker
+ if index < alertCount {
+ logCheck = Matches
+ } else {
+ logCheck = Not(Matches)
+ }
+ pattern := fmt.Sprintf(`%sContainer using over %d%% of memory \(rss %d/%d bytes\)"`,
+ logMsgPrefix, expectPercentage, s.debian12MemoryCurrent, s.debian12MemoryCurrent*3/2)
+ c.Check(logs, logCheck, pattern)
+ }
+}
+
+func (s *suite) TestZeroRSSThresholds(c *C) {
+ s.testRSSThresholds(c, []int64{}, 0)
+}
+
+func (s *suite) TestOneRSSThresholdPassed(c *C) {
+ s.testRSSThresholds(c, []int64{55}, 1)
+}
+
+func (s *suite) TestOneRSSThresholdNotPassed(c *C) {
+ s.testRSSThresholds(c, []int64{85}, 0)
+}
+
+func (s *suite) TestMultipleRSSThresholdsNonePassed(c *C) {
+ s.testRSSThresholds(c, []int64{95, 97, 99}, 0)
+}
+
+func (s *suite) TestMultipleRSSThresholdsSomePassed(c *C) {
+ s.testRSSThresholds(c, []int64{45, 60, 75, 90}, 2)
+}
- f, err := os.Open("./crunchstat_test.go")
- if err != nil {
- t.Fatalf("Opening ./crunchstat_test.go: %s", err)
+func (s *suite) TestMultipleRSSThresholdsAllPassed(c *C) {
+ s.testRSSThresholds(c, []int64{1, 2, 3}, 3)
+}
+
+func (s *suite) TestLogMaxima(c *C) {
+ rep := Reporter{
+ Pid: testdata["debian12"].Pid,
+ FS: testdata["debian12"].FS(),
+ Logger: s.logger,
+ PollPeriod: time.Second * 10,
+ TempDir: "/",
}
- data, err := rep.readAllOrWarn(f)
- if err != nil {
- t.Fatalf("got error %s", err)
+ rep.Start()
+ rep.Stop()
+ rep.LogMaxima(s.logger, map[string]int64{"rss": s.debian12MemoryCurrent * 3 / 2})
+ logs := s.logbuf.String()
+ c.Logf("%s", logs)
+
+ expectRSS := fmt.Sprintf(`Maximum container memory rss usage was %d%%, %d/%d bytes`,
+ 66, s.debian12MemoryCurrent, s.debian12MemoryCurrent*3/2)
+ for _, expected := range []string{
+ `Maximum disk usage was \d+%, \d+/\d+ bytes`,
+ `Maximum container memory swap usage was \d\d+ bytes`,
+ `Maximum container memory pgmajfault usage was \d\d+ faults`,
+ expectRSS,
+ } {
+ pattern := logMsgPrefix + expected + `"`
+ c.Check(logs, Matches, pattern)
}
- if matched, err := regexp.MatchString("\npackage crunchstat\n", string(data)); err != nil || !matched {
- t.Fatalf("data failed regexp: err %v, matched %v", err, matched)
+}
+
+func (s *suite) TestLogProcessMemMax(c *C) {
+ rep := Reporter{
+ Pid: os.Getpid,
+ Logger: s.logger,
+ PollPeriod: time.Second * 10,
}
+ rep.ReportPID("test-run", os.Getpid())
+ rep.Start()
+ rep.Stop()
+ rep.LogProcessMemMax(s.logger)
+ logs := s.logbuf.String()
+ c.Logf("%s", logs)
+
+ pattern := logMsgPrefix + `Maximum test-run memory rss usage was \d+ bytes"`
+ c.Check(logs, Matches, pattern)
}
diff --git a/lib/crunchstat/testdata/debian10/proc/3288/cgroup b/lib/crunchstat/testdata/debian10/proc/3288/cgroup
new file mode 100755
index 0000000000..b51ec39063
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/proc/3288/cgroup
@@ -0,0 +1 @@
+0::/user.slice/user-1000.slice/session-7.scope
diff --git a/lib/crunchstat/testdata/debian10/proc/3288/cpuset b/lib/crunchstat/testdata/debian10/proc/3288/cpuset
new file mode 100755
index 0000000000..b498fd495d
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/proc/3288/cpuset
@@ -0,0 +1 @@
+/
diff --git a/lib/crunchstat/testdata/debian10/proc/3288/net/dev b/lib/crunchstat/testdata/debian10/proc/3288/net/dev
new file mode 100755
index 0000000000..44d19e1d84
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/proc/3288/net/dev
@@ -0,0 +1,5 @@
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ ens5: 168696850 62770 0 0 0 0 0 0 1202238 11890 0 0 0 0 0 0
+ lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+docker0: 0 0 0 0 0 0 0 0 1080 12 0 0 0 0 0 0
diff --git a/lib/crunchstat/testdata/debian10/proc/cpuinfo b/lib/crunchstat/testdata/debian10/proc/cpuinfo
new file mode 100755
index 0000000000..b57280f47a
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/proc/cpuinfo
@@ -0,0 +1,54 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 85
+model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz
+stepping : 4
+microcode : 0x2007006
+cpu MHz : 2499.998
+cache size : 33792 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit mmio_stale_data retbleed gds
+bogomips : 4999.99
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 85
+model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz
+stepping : 4
+microcode : 0x2007006
+cpu MHz : 2499.998
+cache size : 33792 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 1
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit mmio_stale_data retbleed gds
+bogomips : 4999.99
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
+power management:
+
diff --git a/lib/crunchstat/testdata/debian10/proc/mounts b/lib/crunchstat/testdata/debian10/proc/mounts
new file mode 100755
index 0000000000..e74553eaf6
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/proc/mounts
@@ -0,0 +1,19 @@
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,nosuid,relatime,size=992288k,nr_inodes=248072,mode=755 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=200676k,mode=755 0 0
+/dev/nvme0n1p1 / ext4 rw,relatime,discard,errors=remount-ro 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
+tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
+cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0
+pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
+bpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=9700 0 0
+mqueue /dev/mqueue mqueue rw,relatime 0 0
+debugfs /sys/kernel/debug debugfs rw,relatime 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
+/dev/nvme0n1p15 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,errors=remount-ro 0 0
+tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=200676k,mode=700,uid=1000,gid=1000 0 0
+/dev/mapper/autoscale_vg-autoscale_lv /tmp ext4 rw,relatime 0 0
diff --git a/lib/crunchstat/testdata/debian10/proc/self/smaps b/lib/crunchstat/testdata/debian10/proc/self/smaps
new file mode 100755
index 0000000000..e4f80e5c30
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/proc/self/smaps
@@ -0,0 +1,2185 @@
+00400000-00403000 r--p 00000000 103:01 268952 /home/admin/arvados-server
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me dw sd
+00403000-01779000 r-xp 00003000 103:01 268952 /home/admin/arvados-server
+Size: 19928 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12376 kB
+Pss: 12376 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12376 kB
+Private_Dirty: 0 kB
+Referenced: 12376 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me dw sd
+01779000-02f2d000 r--p 01379000 103:01 268952 /home/admin/arvados-server
+Size: 24272 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 10588 kB
+Pss: 10588 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 9084 kB
+Private_Dirty: 1504 kB
+Referenced: 10588 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me dw sd
+02f2e000-02f2f000 r--p 02b2d000 103:01 268952 /home/admin/arvados-server
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me dw ac sd
+02f2f000-02fc6000 rw-p 02b2e000 103:01 268952 /home/admin/arvados-server
+Size: 604 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 456 kB
+Pss: 456 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 456 kB
+Referenced: 456 kB
+Anonymous: 176 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me dw ac sd
+02fc6000-0300d000 rw-p 00000000 00:00 0
+Size: 284 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 96 kB
+Pss: 96 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 96 kB
+Referenced: 96 kB
+Anonymous: 96 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+03590000-035b1000 rw-p 00000000 00:00 0 [heap]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+c000000000-c000800000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8192 kB
+Pss: 8192 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8192 kB
+Referenced: 8192 kB
+Anonymous: 8192 kB
+LazyFree: 0 kB
+AnonHugePages: 8192 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+c000800000-c004000000 ---p 00000000 00:00 0
+Size: 57344 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f851ffc0000-7f8520000000 rw-p 00000000 00:00 0
+Size: 256 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 244 kB
+Pss: 244 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 244 kB
+Referenced: 244 kB
+Anonymous: 244 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f8520000000-7f8520021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me nr sd
+7f8520021000-7f8524000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me nr sd
+7f8524000000-7f8524021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me nr sd
+7f8524021000-7f8528000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me nr sd
+7f8528000000-7f8528021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me nr sd
+7f8528021000-7f852c000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me nr sd
+7f852c000000-7f852c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me nr sd
+7f852c021000-7f8530000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me nr sd
+7f8530000000-7f8530021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me nr sd
+7f8530021000-7f8534000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me nr sd
+7f853401c000-7f853429c000 rw-p 00000000 00:00 0
+Size: 2560 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 1028 kB
+Pss: 1028 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 1028 kB
+Referenced: 1028 kB
+Anonymous: 1028 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f853429c000-7f853429d000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f853429d000-7f8534a9d000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f8534a9d000-7f8534a9e000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f8534a9e000-7f85352de000 rw-p 00000000 00:00 0
+Size: 8448 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 252 kB
+Pss: 252 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 252 kB
+Referenced: 252 kB
+Anonymous: 252 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f85352de000-7f85352df000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f85352df000-7f8535adf000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f8535adf000-7f8535ae0000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f8535ae0000-7f85362e0000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f85362e0000-7f85362e1000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f85362e1000-7f8538e00000 rw-p 00000000 00:00 0
+Size: 44156 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 2200 kB
+Pss: 2200 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 2200 kB
+Referenced: 2200 kB
+Anonymous: 2200 kB
+LazyFree: 0 kB
+AnonHugePages: 2048 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f8538e00000-7f8539000000 rw-p 00000000 00:00 0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd hg
+7f8539000000-7f853911d000 rw-p 00000000 00:00 0
+Size: 1140 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f853911d000-7f8549696000 ---p 00000000 00:00 0
+Size: 267748 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f8549696000-7f8549697000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f8549697000-7f855b546000 ---p 00000000 00:00 0
+Size: 293564 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855b546000-7f855b547000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855b547000-7f855d91c000 ---p 00000000 00:00 0
+Size: 36692 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855d91c000-7f855d91d000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855d91d000-7f855dd96000 ---p 00000000 00:00 0
+Size: 4580 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855dd96000-7f855dd97000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855dd97000-7f855de16000 ---p 00000000 00:00 0
+Size: 508 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855de16000-7f855de79000 rw-p 00000000 00:00 0
+Size: 396 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 56 kB
+Pss: 56 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 56 kB
+Referenced: 56 kB
+Anonymous: 56 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855de79000-7f855de7b000 r--p 00000000 103:01 2008 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855de7b000-7f855de7e000 r-xp 00002000 103:01 2008 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855de7e000-7f855de7f000 r--p 00005000 103:01 2008 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855de7f000-7f855de80000 r--p 00005000 103:01 2008 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855de80000-7f855de81000 rw-p 00006000 103:01 2008 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855de81000-7f855de83000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855de83000-7f855de84000 r--p 00000000 103:01 2214 /usr/lib/x86_64-linux-gnu/libdl-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855de84000-7f855de85000 r-xp 00001000 103:01 2214 /usr/lib/x86_64-linux-gnu/libdl-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855de85000-7f855de86000 r--p 00002000 103:01 2214 /usr/lib/x86_64-linux-gnu/libdl-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855de86000-7f855de87000 r--p 00002000 103:01 2214 /usr/lib/x86_64-linux-gnu/libdl-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855de87000-7f855de88000 rw-p 00003000 103:01 2214 /usr/lib/x86_64-linux-gnu/libdl-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855de88000-7f855de8b000 r--p 00000000 103:01 324 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855de8b000-7f855de92000 r-xp 00003000 103:01 324 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 28 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 24 kB
+Pss: 1 kB
+Shared_Clean: 24 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 24 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855de92000-7f855dea6000 r--p 0000a000 103:01 324 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 80 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855dea6000-7f855dea7000 ---p 0001e000 103:01 324 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855dea7000-7f855dea8000 r--p 0001e000 103:01 324 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855dea8000-7f855dea9000 rw-p 0001f000 103:01 324 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855dea9000-7f855deb3000 rw-p 00000000 00:00 0
+Size: 40 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855deb3000-7f855ded5000 r--p 00000000 103:01 2212 /usr/lib/x86_64-linux-gnu/libc-2.28.so
+Size: 136 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 136 kB
+Pss: 4 kB
+Shared_Clean: 136 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 136 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855ded5000-7f855e01c000 r-xp 00022000 103:01 2212 /usr/lib/x86_64-linux-gnu/libc-2.28.so
+Size: 1308 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 612 kB
+Pss: 20 kB
+Shared_Clean: 612 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 612 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855e01c000-7f855e068000 r--p 00169000 103:01 2212 /usr/lib/x86_64-linux-gnu/libc-2.28.so
+Size: 304 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 128 kB
+Pss: 4 kB
+Shared_Clean: 128 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 128 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e068000-7f855e069000 ---p 001b5000 103:01 2212 /usr/lib/x86_64-linux-gnu/libc-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855e069000-7f855e06d000 r--p 001b5000 103:01 2212 /usr/lib/x86_64-linux-gnu/libc-2.28.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855e06d000-7f855e06f000 rw-p 001b9000 103:01 2212 /usr/lib/x86_64-linux-gnu/libc-2.28.so
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e06f000-7f855e073000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 12 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e073000-7f855e076000 r--p 00000000 103:01 2514 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e076000-7f855e07e000 r-xp 00003000 103:01 2514 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 2 kB
+Shared_Clean: 32 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 32 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855e07e000-7f855e082000 r--p 0000b000 103:01 2514 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e082000-7f855e083000 r--p 0000e000 103:01 2514 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855e083000-7f855e084000 rw-p 0000f000 103:01 2514 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e084000-7f855e08a000 r--p 00000000 103:01 2228 /usr/lib/x86_64-linux-gnu/libpthread-2.28.so
+Size: 24 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 24 kB
+Pss: 1 kB
+Shared_Clean: 24 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 24 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e08a000-7f855e099000 r-xp 00006000 103:01 2228 /usr/lib/x86_64-linux-gnu/libpthread-2.28.so
+Size: 60 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 60 kB
+Pss: 2 kB
+Shared_Clean: 60 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 60 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855e099000-7f855e09f000 r--p 00015000 103:01 2228 /usr/lib/x86_64-linux-gnu/libpthread-2.28.so
+Size: 24 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e09f000-7f855e0a0000 r--p 0001a000 103:01 2228 /usr/lib/x86_64-linux-gnu/libpthread-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855e0a0000-7f855e0a1000 rw-p 0001b000 103:01 2228 /usr/lib/x86_64-linux-gnu/libpthread-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e0a1000-7f855e0a5000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e0a5000-7f855e0a9000 r--p 00000000 103:01 2229 /usr/lib/x86_64-linux-gnu/libresolv-2.28.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 2 kB
+Shared_Clean: 16 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 16 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e0a9000-7f855e0b6000 r-xp 00004000 103:01 2229 /usr/lib/x86_64-linux-gnu/libresolv-2.28.so
+Size: 52 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 52 kB
+Pss: 7 kB
+Shared_Clean: 52 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 52 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me sd
+7f855e0b6000-7f855e0ba000 r--p 00011000 103:01 2229 /usr/lib/x86_64-linux-gnu/libresolv-2.28.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me sd
+7f855e0ba000-7f855e0bb000 ---p 00015000 103:01 2229 /usr/lib/x86_64-linux-gnu/libresolv-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: mr mw me sd
+7f855e0bb000-7f855e0bc000 r--p 00015000 103:01 2229 /usr/lib/x86_64-linux-gnu/libresolv-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me ac sd
+7f855e0bc000-7f855e0bd000 rw-p 00016000 103:01 2229 /usr/lib/x86_64-linux-gnu/libresolv-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e0bd000-7f855e0c1000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7f855e0cd000-7f855e0ce000 r--p 00000000 103:01 2204 /usr/lib/x86_64-linux-gnu/ld-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me dw sd
+7f855e0ce000-7f855e0ec000 r-xp 00001000 103:01 2204 /usr/lib/x86_64-linux-gnu/ld-2.28.so
+Size: 120 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 120 kB
+Pss: 3 kB
+Shared_Clean: 120 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 120 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me dw sd
+7f855e0ec000-7f855e0f4000 r--p 0001f000 103:01 2204 /usr/lib/x86_64-linux-gnu/ld-2.28.so
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 1 kB
+Shared_Clean: 32 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 32 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me dw sd
+7f855e0f4000-7f855e0f5000 r--p 00026000 103:01 2204 /usr/lib/x86_64-linux-gnu/ld-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr mw me dw ac sd
+7f855e0f5000-7f855e0f6000 rw-p 00027000 103:01 2204 /usr/lib/x86_64-linux-gnu/ld-2.28.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd wr mr mw me dw ac sd
+7f855e0f6000-7f855e0f7000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me ac sd
+7fffd54dc000-7fffd54fd000 rw-p 00000000 00:00 0 [stack]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+ProtectionKey: 0
+VmFlags: rd wr mr mw me gd ac
+7fffd556f000-7fffd5572000 r--p 00000000 00:00 0 [vvar]
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd mr pf io de dd sd
+7fffd5572000-7fffd5574000 r-xp 00000000 00:00 0 [vdso]
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+ProtectionKey: 0
+VmFlags: rd ex mr mw me de sd
diff --git a/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/cpu.max b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/cpu.max
new file mode 100755
index 0000000000..1c1d3e7c30
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/cpu.max
@@ -0,0 +1 @@
+max 100000
diff --git a/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/io.stat b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/io.stat
new file mode 100755
index 0000000000..14a8cfc4dd
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/io.stat
@@ -0,0 +1,3 @@
+259:4 rbytes=12288 wbytes=123613184 rios=3 wios=482 dbytes=0 dios=0
+254:0 rbytes=12288 wbytes=123613184 rios=3 wios=482 dbytes=0 dios=0
+259:0 rbytes=4071424 wbytes=38789120 rios=248 wios=157 dbytes=0 dios=0
diff --git a/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/cpu.stat b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/cpu.stat
new file mode 100755
index 0000000000..c71942778a
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/cpu.stat
@@ -0,0 +1,3 @@
+usage_usec 2670017
+user_usec 1381923
+system_usec 1288094
diff --git a/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.current b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.current
new file mode 100755
index 0000000000..438275a946
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.current
@@ -0,0 +1 @@
+133386240
diff --git a/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.stat b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.stat
new file mode 100755
index 0000000000..533635bd15
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.stat
@@ -0,0 +1,28 @@
+anon 16777216
+file 109891584
+kernel_stack 98304
+slab 5595136
+sock 0
+shmem 0
+file_mapped 23924736
+file_dirty 7163904
+file_writeback 135168
+inactive_anon 0
+active_anon 16818176
+inactive_file 108355584
+active_file 1560576
+unevictable 0
+slab_reclaimable 4489216
+slab_unreclaimable 1105920
+pgfault 67947
+pgmajfault 0
+pgrefill 0
+pgscan 0
+pgsteal 0
+pgactivate 0
+pgdeactivate 0
+pglazyfree 0
+pglazyfreed 0
+workingset_refault 0
+workingset_activate 0
+workingset_nodereclaim 0
diff --git a/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.swap.current b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.swap.current
new file mode 100755
index 0000000000..573541ac97
--- /dev/null
+++ b/lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.swap.current
@@ -0,0 +1 @@
+0
diff --git a/lib/crunchstat/testdata/debian11/proc/4153022/cgroup b/lib/crunchstat/testdata/debian11/proc/4153022/cgroup
new file mode 100755
index 0000000000..3db44ec6b0
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/proc/4153022/cgroup
@@ -0,0 +1 @@
+0::/user.slice/user-1000.slice/session-5424.scope
diff --git a/lib/crunchstat/testdata/debian11/proc/4153022/cpuset b/lib/crunchstat/testdata/debian11/proc/4153022/cpuset
new file mode 100755
index 0000000000..fb6c61a862
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/proc/4153022/cpuset
@@ -0,0 +1 @@
+/user.slice
diff --git a/lib/crunchstat/testdata/debian11/proc/4153022/net/dev b/lib/crunchstat/testdata/debian11/proc/4153022/net/dev
new file mode 100755
index 0000000000..abd7cef726
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/proc/4153022/net/dev
@@ -0,0 +1,7 @@
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 161155690314 90375905 0 0 0 0 0 0 161155690314 90375905 0 0 0 0 0 0
+ ens3: 163923112 1884265 0 0 0 0 0 0 43218121 239766 0 0 0 0 0 0
+ ens9: 24574250159 83081845 0 0 0 0 0 0 49312502353 91591944 0 0 0 0 0 0
+docker0: 6958795 109630 0 0 0 0 0 0 671569248 187319 0 0 0 0 0 0
+tailscale0: 82192857 118550 0 0 0 0 0 0 6898232 100243 0 0 0 0 0 0
diff --git a/lib/crunchstat/testdata/debian11/proc/cpuinfo b/lib/crunchstat/testdata/debian11/proc/cpuinfo
new file mode 100644
index 0000000000..6df8854afc
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/proc/cpuinfo
@@ -0,0 +1,224 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 0
+cpu cores : 8
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 1
+cpu cores : 8
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 2
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 2
+cpu cores : 8
+apicid : 2
+initial apicid : 2
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 3
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 3
+cpu cores : 8
+apicid : 3
+initial apicid : 3
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 4
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 4
+cpu cores : 8
+apicid : 4
+initial apicid : 4
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 5
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 5
+cpu cores : 8
+apicid : 5
+initial apicid : 5
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 6
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 6
+cpu cores : 8
+apicid : 6
+initial apicid : 6
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
+processor : 7
+vendor_id : GenuineIntel
+cpu family : 6
+model : 61
+model name : Intel Core Processor (Broadwell)
+stepping : 2
+microcode : 0x1
+cpu MHz : 3292.366
+cache size : 4096 KB
+physical id : 0
+siblings : 8
+core id : 7
+cpu cores : 8
+apicid : 7
+initial apicid : 7
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.73
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+
diff --git a/lib/crunchstat/testdata/debian11/proc/mounts b/lib/crunchstat/testdata/debian11/proc/mounts
new file mode 100755
index 0000000000..715844c808
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/proc/mounts
@@ -0,0 +1,23 @@
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,nosuid,relatime,size=4055540k,nr_inodes=1013885,mode=755 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=814692k,mode=755 0 0
+/dev/vdb1 / ext4 rw,relatime,errors=remount-ro 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
+tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
+cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot 0 0
+pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
+none /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=30,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=9589 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
+mqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0
+debugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0
+tracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0
+configfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0
+none /tmp tmpfs rw,relatime 0 0
+tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=814688k,nr_inodes=203672,mode=700,uid=1000,gid=1000 0 0
+binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0
+arvados-client /home/tom/keep fuse.arvados-client rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
diff --git a/lib/crunchstat/testdata/debian11/proc/self/smaps b/lib/crunchstat/testdata/debian11/proc/self/smaps
new file mode 100755
index 0000000000..f82b32b4cc
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/proc/self/smaps
@@ -0,0 +1,2461 @@
+00400000-00403000 r--p 00000000 fe:11 1200832 /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+00403000-01776000 r-xp 00003000 fe:11 1200832 /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server
+Size: 19916 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12492 kB
+Pss: 12492 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12492 kB
+Private_Dirty: 0 kB
+Referenced: 12492 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me dw sd
+01776000-02f28000 r--p 01376000 fe:11 1200832 /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server
+Size: 24264 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 10984 kB
+Pss: 10984 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 10984 kB
+Private_Dirty: 0 kB
+Referenced: 10984 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+02f28000-02f29000 r--p 02b27000 fe:11 1200832 /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw ac sd
+02f29000-02fc0000 rw-p 02b28000 fe:11 1200832 /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server
+Size: 604 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 480 kB
+Pss: 480 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 304 kB
+Private_Dirty: 176 kB
+Referenced: 480 kB
+Anonymous: 176 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me dw ac sd
+02fc0000-03007000 rw-p 00000000 00:00 0
+Size: 284 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 100 kB
+Pss: 100 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 100 kB
+Referenced: 100 kB
+Anonymous: 100 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+03a45000-03a66000 rw-p 00000000 00:00 0 [heap]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+c000000000-c000800000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 5684 kB
+Pss: 5684 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 5684 kB
+Referenced: 5684 kB
+Anonymous: 5684 kB
+LazyFree: 0 kB
+AnonHugePages: 2048 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+c000800000-c004000000 ---p 00000000 00:00 0
+Size: 57344 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f3efc000000-7f3efc021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3efc021000-7f3f00000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f00000000-7f3f00021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f00021000-7f3f04000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f077ff000-7f3f07800000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f07800000-7f3f08000000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 12 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f08000000-7f3f08021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f08021000-7f3f0c000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f0c000000-7f3f0c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f0c021000-7f3f10000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f10000000-7f3f10021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f10021000-7f3f14000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f14000000-7f3f14021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f14021000-7f3f18000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f18000000-7f3f18021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f18021000-7f3f1c000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f1c000000-7f3f1c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f1c021000-7f3f20000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f20000000-7f3f20021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f3f20021000-7f3f24000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f3f24361000-7f3f24421000 rw-p 00000000 00:00 0
+Size: 768 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 276 kB
+Pss: 276 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 276 kB
+Referenced: 276 kB
+Anonymous: 276 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f24421000-7f3f24422000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f24422000-7f3f24c62000 rw-p 00000000 00:00 0
+Size: 8448 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f24c62000-7f3f24c63000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f24c63000-7f3f25543000 rw-p 00000000 00:00 0
+Size: 9088 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 720 kB
+Pss: 720 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 720 kB
+Referenced: 720 kB
+Anonymous: 720 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f25543000-7f3f25544000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f25544000-7f3f25ee4000 rw-p 00000000 00:00 0
+Size: 9856 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 288 kB
+Pss: 288 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 288 kB
+Referenced: 288 kB
+Anonymous: 288 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f25ee4000-7f3f25ee5000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f25ee5000-7f3f266e5000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f266e5000-7f3f266e6000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f266e6000-7f3f26f26000 rw-p 00000000 00:00 0
+Size: 8448 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 264 kB
+Pss: 264 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 264 kB
+Referenced: 264 kB
+Anonymous: 264 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f26f26000-7f3f26f27000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f26f27000-7f3f27727000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f27727000-7f3f27728000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f27728000-7f3f27f28000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f27f28000-7f3f27f29000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f27f29000-7f3f2aa00000 rw-p 00000000 00:00 0
+Size: 43868 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 164 kB
+Pss: 164 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 164 kB
+Referenced: 164 kB
+Anonymous: 164 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f3f2aa00000-7f3f2ac00000 rw-p 00000000 00:00 0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd hg
+7f3f2ac00000-7f3f2ad65000 rw-p 00000000 00:00 0
+Size: 1428 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f2ad65000-7f3f3b2de000 ---p 00000000 00:00 0
+Size: 267748 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f3f3b2de000-7f3f3b2df000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f3b2df000-7f3f4d18e000 ---p 00000000 00:00 0
+Size: 293564 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f3f4d18e000-7f3f4d18f000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4d18f000-7f3f4f564000 ---p 00000000 00:00 0
+Size: 36692 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f3f4f564000-7f3f4f565000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4f565000-7f3f4f9de000 ---p 00000000 00:00 0
+Size: 4580 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f3f4f9de000-7f3f4f9df000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4f9df000-7f3f4fa5e000 ---p 00000000 00:00 0
+Size: 508 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f3f4fa5e000-7f3f4fac1000 rw-p 00000000 00:00 0
+Size: 396 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 72 kB
+Pss: 72 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 72 kB
+Referenced: 72 kB
+Anonymous: 72 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fac1000-7f3f4fac3000 r--p 00000000 fe:11 131148 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fac3000-7f3f4fac6000 r-xp 00002000 fe:11 131148 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4fac6000-7f3f4fac7000 r--p 00005000 fe:11 131148 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fac7000-7f3f4fac8000 r--p 00005000 fe:11 131148 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4fac8000-7f3f4fac9000 rw-p 00006000 fe:11 131148 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fac9000-7f3f4facb000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4facb000-7f3f4facc000 r--p 00000000 fe:11 131382 /lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4facc000-7f3f4face000 r-xp 00001000 fe:11 131382 /lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4face000-7f3f4facf000 r--p 00003000 fe:11 131382 /lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4facf000-7f3f4fad0000 r--p 00003000 fe:11 131382 /lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4fad0000-7f3f4fad1000 rw-p 00004000 fe:11 131382 /lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fad1000-7f3f4fad4000 r--p 00000000 fe:11 131116 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fad4000-7f3f4fadc000 r-xp 00003000 fe:11 131116 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 1 kB
+Shared_Clean: 32 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 32 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4fadc000-7f3f4faf0000 r--p 0000b000 fe:11 131116 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 80 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4faf0000-7f3f4faf1000 r--p 0001e000 fe:11 131116 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4faf1000-7f3f4faf2000 rw-p 0001f000 fe:11 131116 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4faf2000-7f3f4fb02000 rw-p 00000000 00:00 0
+Size: 64 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fb02000-7f3f4fb24000 r--p 00000000 fe:11 131364 /lib/x86_64-linux-gnu/libc-2.31.so
+Size: 136 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 136 kB
+Pss: 2 kB
+Shared_Clean: 136 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 136 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fb24000-7f3f4fc7d000 r-xp 00022000 fe:11 131364 /lib/x86_64-linux-gnu/libc-2.31.so
+Size: 1380 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 688 kB
+Pss: 13 kB
+Shared_Clean: 688 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 688 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4fc7d000-7f3f4fccc000 r--p 0017b000 fe:11 131364 /lib/x86_64-linux-gnu/libc-2.31.so
+Size: 316 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 128 kB
+Pss: 2 kB
+Shared_Clean: 128 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 128 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fccc000-7f3f4fcd0000 r--p 001c9000 fe:11 131364 /lib/x86_64-linux-gnu/libc-2.31.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4fcd0000-7f3f4fcd2000 rw-p 001cd000 fe:11 131364 /lib/x86_64-linux-gnu/libc-2.31.so
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fcd2000-7f3f4fcd6000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fcd6000-7f3f4fcd9000 r--p 00000000 fe:11 131147 /lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fcd9000-7f3f4fce2000 r-xp 00003000 fe:11 131147 /lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 36 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 36 kB
+Pss: 1 kB
+Shared_Clean: 36 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 36 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4fce2000-7f3f4fce6000 r--p 0000c000 fe:11 131147 /lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fce6000-7f3f4fce7000 r--p 0000f000 fe:11 131147 /lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4fce7000-7f3f4fce8000 rw-p 00010000 fe:11 131147 /lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fce8000-7f3f4fcee000 r--p 00000000 fe:11 131619 /lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 24 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 24 kB
+Pss: 0 kB
+Shared_Clean: 24 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 24 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fcee000-7f3f4fcfe000 r-xp 00006000 fe:11 131619 /lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 64 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 64 kB
+Pss: 1 kB
+Shared_Clean: 64 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 64 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4fcfe000-7f3f4fd04000 r--p 00016000 fe:11 131619 /lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 24 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fd04000-7f3f4fd05000 r--p 0001b000 fe:11 131619 /lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4fd05000-7f3f4fd06000 rw-p 0001c000 fe:11 131619 /lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fd06000-7f3f4fd0a000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fd0a000-7f3f4fd0e000 r--p 00000000 fe:11 133617 /lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 1 kB
+Shared_Clean: 16 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 16 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fd0e000-7f3f4fd1c000 r-xp 00004000 fe:11 133617 /lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 56 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 56 kB
+Pss: 4 kB
+Shared_Clean: 56 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 56 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f3f4fd1c000-7f3f4fd20000 r--p 00012000 fe:11 133617 /lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f3f4fd20000-7f3f4fd21000 r--p 00015000 fe:11 133617 /lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f3f4fd21000-7f3f4fd22000 rw-p 00016000 fe:11 133617 /lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fd22000-7f3f4fd26000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f3f4fd34000-7f3f4fd35000 r--p 00000000 fe:11 131157 /lib/x86_64-linux-gnu/ld-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+7f3f4fd35000-7f3f4fd55000 r-xp 00001000 fe:11 131157 /lib/x86_64-linux-gnu/ld-2.31.so
+Size: 128 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 128 kB
+Pss: 1 kB
+Shared_Clean: 128 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 128 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me dw sd
+7f3f4fd55000-7f3f4fd5d000 r--p 00021000 fe:11 131157 /lib/x86_64-linux-gnu/ld-2.31.so
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 0 kB
+Shared_Clean: 32 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 32 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+7f3f4fd5e000-7f3f4fd5f000 r--p 00029000 fe:11 131157 /lib/x86_64-linux-gnu/ld-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw ac sd
+7f3f4fd5f000-7f3f4fd60000 rw-p 0002a000 fe:11 131157 /lib/x86_64-linux-gnu/ld-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me dw ac sd
+7f3f4fd60000-7f3f4fd61000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7fff6be6f000-7fff6be90000 rw-p 00000000 00:00 0 [stack]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 20 kB
+Pss: 20 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 20 kB
+Referenced: 20 kB
+Anonymous: 20 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me gd ac
+7fff6bee1000-7fff6bee5000 r--p 00000000 00:00 0 [vvar]
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr pf io de dd sd
+7fff6bee5000-7fff6bee7000 r-xp 00000000 00:00 0 [vdso]
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me de sd
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpu.max b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpu.max
new file mode 100755
index 0000000000..1c1d3e7c30
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpu.max
@@ -0,0 +1 @@
+max 100000
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpuset.cpus.effective b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpuset.cpus.effective
new file mode 100755
index 0000000000..74fc2fb6b0
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpuset.cpus.effective
@@ -0,0 +1 @@
+0-7
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/io.stat b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/io.stat
new file mode 100755
index 0000000000..34cbdb8634
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/io.stat
@@ -0,0 +1,4 @@
+7:1 rbytes=7218176 wbytes=0 rios=240 wios=0 dbytes=0 dios=0
+7:2 rbytes=2115584 wbytes=0 rios=64 wios=0 dbytes=0 dios=0
+7:0 rbytes=218925056 wbytes=0 rios=7382 wios=0 dbytes=0 dios=0
+254:16 rbytes=268548554752 wbytes=121274503168 rios=32054623 wios=8793862 dbytes=0 dios=0
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/cpu.stat b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/cpu.stat
new file mode 100755
index 0000000000..ffd34458f1
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/cpu.stat
@@ -0,0 +1,3 @@
+usage_usec 935017572836
+user_usec 441034348821
+system_usec 493983224015
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.current b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.current
new file mode 100755
index 0000000000..9e5f0fbc1b
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.current
@@ -0,0 +1 @@
+3662082048
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.stat b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.stat
new file mode 100755
index 0000000000..e72becb2c2
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.stat
@@ -0,0 +1,36 @@
+anon 869666816
+file 2622799872
+kernel_stack 4276224
+percpu 0
+sock 0
+shmem 849936384
+file_mapped 57311232
+file_dirty 270336
+file_writeback 135168
+anon_thp 553648128
+inactive_anon 391749632
+active_anon 1332850688
+inactive_file 243453952
+active_file 1529008128
+unevictable 0
+slab_reclaimable 135355928
+slab_unreclaimable 8377048
+slab 143732976
+workingset_refault_anon 84645
+workingset_refault_file 7429752
+workingset_activate_anon 15444
+workingset_activate_file 4704645
+workingset_restore_anon 1551
+workingset_restore_file 2826087
+workingset_nodereclaim 0
+pgfault 1688981547
+pgmajfault 322476
+pgrefill 24091451
+pgscan 32183888
+pgsteal 18202144
+pgactivate 32572518
+pgdeactivate 13641072
+pglazyfree 1254
+pglazyfreed 0
+thp_fault_alloc 149061
+thp_collapse_alloc 3267
diff --git a/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.swap.current b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.swap.current
new file mode 100755
index 0000000000..cadc7c5cde
--- /dev/null
+++ b/lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.swap.current
@@ -0,0 +1 @@
+2462470144
diff --git a/lib/crunchstat/testdata/debian12/proc/1115883/cgroup b/lib/crunchstat/testdata/debian12/proc/1115883/cgroup
new file mode 100755
index 0000000000..af9540a755
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/proc/1115883/cgroup
@@ -0,0 +1 @@
+0::/user.slice/user-1000.slice/session-4.scope
diff --git a/lib/crunchstat/testdata/debian12/proc/1115883/cpuset b/lib/crunchstat/testdata/debian12/proc/1115883/cpuset
new file mode 100755
index 0000000000..fb6c61a862
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/proc/1115883/cpuset
@@ -0,0 +1 @@
+/user.slice
diff --git a/lib/crunchstat/testdata/debian12/proc/1115883/net/dev b/lib/crunchstat/testdata/debian12/proc/1115883/net/dev
new file mode 100755
index 0000000000..6a28430fa7
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/proc/1115883/net/dev
@@ -0,0 +1,4 @@
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 44467931 32124 0 0 0 0 0 0 44467931 32124 0 0 0 0 0 0
+enp4s0: 76312173774 219652689 0 33 0 0 0 226563 52498381226 153789479 0 0 0 0 0 0
diff --git a/lib/crunchstat/testdata/debian12/proc/cpuinfo b/lib/crunchstat/testdata/debian12/proc/cpuinfo
new file mode 100644
index 0000000000..0685c5f2b6
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/proc/cpuinfo
@@ -0,0 +1,224 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3591.771
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 0
+cpu cores : 4
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3591.750
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 0
+cpu cores : 4
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 2
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3602.533
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 1
+cpu cores : 4
+apicid : 2
+initial apicid : 2
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 3
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3607.600
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 1
+cpu cores : 4
+apicid : 3
+initial apicid : 3
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 4
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3600.169
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 2
+cpu cores : 4
+apicid : 4
+initial apicid : 4
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 5
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3609.318
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 2
+cpu cores : 4
+apicid : 5
+initial apicid : 5
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 6
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3591.905
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 3
+cpu cores : 4
+apicid : 6
+initial apicid : 6
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 7
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3591.804
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 3
+cpu cores : 4
+apicid : 7
+initial apicid : 7
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown
+bogomips : 6584.91
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
diff --git a/lib/crunchstat/testdata/debian12/proc/mounts b/lib/crunchstat/testdata/debian12/proc/mounts
new file mode 100755
index 0000000000..f8850e27df
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/proc/mounts
@@ -0,0 +1,32 @@
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,relatime 0 0
+udev /dev devtmpfs rw,nosuid,relatime,size=16346052k,nr_inodes=4086513,mode=755,inode64 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=3275420k,mode=755,inode64 0 0
+/dev/mapper/slab1-root / ext4 rw,relatime,errors=remount-ro,stripe=8191 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,nosuid,nodev,inode64 0 0
+tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k,inode64 0 0
+cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot 0 0
+pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
+bpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=29,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=16801 0 0
+tracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0
+mqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0
+debugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0
+configfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0
+ramfs /run/credentials/systemd-sysusers.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
+ramfs /run/credentials/systemd-sysctl.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
+ramfs /run/credentials/systemd-tmpfiles-setup-dev.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
+/dev/mapper/slab1-home /home ext4 rw,relatime,errors=remount-ro 0 0
+/dev/md0p1 /boot ext4 rw,relatime,stripe=8191 0 0
+ramfs /run/credentials/systemd-tmpfiles-setup.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
+binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=3275416k,nr_inodes=818854,mode=700,uid=1000,gid=1000,inode64 0 0
+gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+/dev/mapper/sea5a /sea5a ext4 rw,relatime 0 0
+portal /run/user/1000/doc fuse.portal rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+curve:/ /tmp/c fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+tmpfs /run/user/0 tmpfs rw,nosuid,nodev,relatime,size=3275416k,nr_inodes=818854,mode=700,inode64 0 0
diff --git a/lib/crunchstat/testdata/debian12/proc/self/smaps b/lib/crunchstat/testdata/debian12/proc/self/smaps
new file mode 100755
index 0000000000..6152a72445
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/proc/self/smaps
@@ -0,0 +1,2640 @@
+00400000-00403000 r--p 00000000 fd:01 2228820 /tmp/arvados-server
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+00403000-01776000 r-xp 00003000 fd:01 2228820 /tmp/arvados-server
+Size: 19916 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12492 kB
+Pss: 12492 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12492 kB
+Private_Dirty: 0 kB
+Referenced: 12492 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+01776000-02f28000 r--p 01376000 fd:01 2228820 /tmp/arvados-server
+Size: 24264 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 10856 kB
+Pss: 10856 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 10856 kB
+Private_Dirty: 0 kB
+Referenced: 10856 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+02f28000-02f29000 r--p 02b27000 fd:01 2228820 /tmp/arvados-server
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+02f29000-02fc0000 rw-p 02b28000 fd:01 2228820 /tmp/arvados-server
+Size: 604 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 480 kB
+Pss: 480 kB
+Pss_Dirty: 176 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 304 kB
+Private_Dirty: 176 kB
+Referenced: 480 kB
+Anonymous: 176 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+02fc0000-03007000 rw-p 00000000 00:00 0
+Size: 284 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 100 kB
+Pss: 100 kB
+Pss_Dirty: 100 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 100 kB
+Referenced: 100 kB
+Anonymous: 100 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+04b9f000-04bc0000 rw-p 00000000 00:00 0 [heap]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+c000000000-c000c00000 rw-p 00000000 00:00 0
+Size: 12288 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 5596 kB
+Pss: 5596 kB
+Pss_Dirty: 5596 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 5596 kB
+Referenced: 5596 kB
+Anonymous: 5596 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+c000c00000-c004000000 ---p 00000000 00:00 0
+Size: 53248 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f716c000000-7f716c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f716c021000-7f7170000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7170000000-7f7170021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7170021000-7f7174000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7174000000-7f7174021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7174021000-7f7178000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f717a7fd000-7f717a7fe000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f717a7fe000-7f717affe000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f717affe000-7f717afff000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f717afff000-7f717b7ff000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f717b7ff000-7f717b800000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f717b800000-7f717c000000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Pss_Dirty: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 12 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f717c000000-7f717c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f717c021000-7f7180000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7180000000-7f7180021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7180021000-7f7184000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7184000000-7f7184021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7184021000-7f7188000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7188000000-7f7188021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7188021000-7f718c000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f718c000000-7f718c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f718c021000-7f7190000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7190000000-7f7190021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7190021000-7f7194000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f7194000000-7f7194021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7194021000-7f7198000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f71985d9000-7f7198769000 rw-p 00000000 00:00 0
+Size: 1600 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 968 kB
+Pss: 968 kB
+Pss_Dirty: 968 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 968 kB
+Referenced: 968 kB
+Anonymous: 968 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f7198769000-7f719876a000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f719876a000-7f7198faa000 rw-p 00000000 00:00 0
+Size: 8448 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 24 kB
+Pss: 24 kB
+Pss_Dirty: 24 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 24 kB
+Referenced: 24 kB
+Anonymous: 24 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f7198faa000-7f7198fab000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7198fab000-7f71997fb000 rw-p 00000000 00:00 0
+Size: 8512 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 32 kB
+Pss_Dirty: 32 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 32 kB
+Referenced: 32 kB
+Anonymous: 32 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f71997fb000-7f71997fc000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f71997fc000-7f7199ffc000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f7199ffc000-7f7199ffd000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7199ffd000-7f719a7fd000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f719a7fd000-7f719a7fe000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f719a7fe000-7f719affe000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f719affe000-7f719afff000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f719afff000-7f719b7ff000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f719b7ff000-7f719b800000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f719b800000-7f719c000000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f719c000000-7f719c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f719c021000-7f71a0000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me nr sd
+7f71a000b000-7f71a01eb000 rw-p 00000000 00:00 0
+Size: 1920 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 524 kB
+Pss: 524 kB
+Pss_Dirty: 524 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 524 kB
+Referenced: 524 kB
+Anonymous: 524 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71a01eb000-7f71a01ec000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f71a01ec000-7f71a2e00000 rw-p 00000000 00:00 0
+Size: 45136 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 168 kB
+Pss: 168 kB
+Pss_Dirty: 168 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 168 kB
+Referenced: 168 kB
+Anonymous: 168 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd
+7f71a2e00000-7f71a3000000 rw-p 00000000 00:00 0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd hg
+7f71a3000000-7f71a3018000 rw-p 00000000 00:00 0
+Size: 96 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71a3018000-7f71b3591000 ---p 00000000 00:00 0
+Size: 267748 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f71b3591000-7f71b3592000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71b3592000-7f71c5441000 ---p 00000000 00:00 0
+Size: 293564 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f71c5441000-7f71c5442000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c5442000-7f71c7817000 ---p 00000000 00:00 0
+Size: 36692 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f71c7817000-7f71c7818000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7818000-7f71c7c91000 ---p 00000000 00:00 0
+Size: 4580 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: mr mw me sd
+7f71c7c91000-7f71c7c92000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7c92000-7f71c7d11000 ---p 00000000 00:00 0
+Size: 508 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f71c7d11000-7f71c7d74000 rw-p 00000000 00:00 0
+Size: 396 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 72 kB
+Pss: 72 kB
+Pss_Dirty: 72 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 72 kB
+Referenced: 72 kB
+Anonymous: 72 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7d74000-7f71c7d76000 r--p 00000000 fd:01 1609774 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 2 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7d76000-7f71c7d79000 r-xp 00002000 fd:01 1609774 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 1 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7d79000-7f71c7d7a000 r--p 00005000 fd:01 1609774 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 1 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7d7a000-7f71c7d7b000 r--p 00006000 fd:01 1609774 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c7d7b000-7f71c7d7c000 rw-p 00007000 fd:01 1609774 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7d7c000-7f71c7d7e000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7d7e000-7f71c7d81000 r--p 00000000 fd:01 1609746 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 3 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7d81000-7f71c7d88000 r-xp 00003000 fd:01 1609746 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 28 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 28 kB
+Pss: 1 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 28 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 28 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7d88000-7f71c7d9d000 r--p 0000a000 fd:01 1609746 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 84 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7d9d000-7f71c7d9e000 r--p 0001e000 fd:01 1609746 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c7d9e000-7f71c7d9f000 rw-p 0001f000 fd:01 1609746 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7d9f000-7f71c7daf000 rw-p 00000000 00:00 0
+Size: 64 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7daf000-7f71c7dd5000 r--p 00000000 fd:01 1576589 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 152 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 152 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 152 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 152 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7dd5000-7f71c7f2a000 r-xp 00026000 fd:01 1576589 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 1364 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 940 kB
+Pss: 4 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 940 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 940 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7f2a000-7f71c7f7d000 r--p 0017b000 fd:01 1576589 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 332 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 128 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 128 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 128 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7f7d000-7f71c7f81000 r--p 001ce000 fd:01 1576589 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Pss_Dirty: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c7f81000-7f71c7f83000 rw-p 001d2000 fd:01 1576589 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7f83000-7f71c7f90000 rw-p 00000000 00:00 0
+Size: 52 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 20 kB
+Pss: 20 kB
+Pss_Dirty: 20 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 20 kB
+Referenced: 20 kB
+Anonymous: 20 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7f90000-7f71c7f93000 r--p 00000000 fd:01 1609792 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 3 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7f93000-7f71c7f9c000 r-xp 00003000 fd:01 1609792 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 36 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 36 kB
+Pss: 11 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 36 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 36 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7f9c000-7f71c7fa0000 r--p 0000c000 fd:01 1609792 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7fa0000-7f71c7fa1000 r--p 0000f000 fd:01 1609792 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c7fa1000-7f71c7fa2000 rw-p 00010000 fd:01 1609792 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7fa2000-7f71c7fa3000 r--p 00000000 fd:01 1609844 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7fa3000-7f71c7fa4000 r-xp 00001000 fd:01 1609844 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7fa4000-7f71c7fa5000 r--p 00002000 fd:01 1609844 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7fa5000-7f71c7fa6000 r--p 00002000 fd:01 1609844 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c7fa6000-7f71c7fa7000 rw-p 00003000 fd:01 1609844 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7fa7000-7f71c7faa000 r--p 00000000 fd:01 1609840 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7faa000-7f71c7fb2000 r-xp 00003000 fd:01 1609840 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 28 kB
+Pss: 1 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 28 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 28 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7fb2000-7f71c7fb4000 r--p 0000b000 fd:01 1609840 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7fb4000-7f71c7fb5000 r--p 0000d000 fd:01 1609840 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c7fb5000-7f71c7fb6000 rw-p 0000e000 fd:01 1609840 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Pss_Dirty: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7fb6000-7f71c7fb8000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7fbe000-7f71c7fd0000 rw-p 00000000 00:00 0
+Size: 72 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 20 kB
+Pss: 20 kB
+Pss_Dirty: 20 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 20 kB
+Referenced: 20 kB
+Anonymous: 20 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f71c7fd0000-7f71c7fd1000 r--p 00000000 fd:01 1586742 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c7fd1000-7f71c7ff6000 r-xp 00001000 fd:01 1586742 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 148 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 148 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 148 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 148 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f71c7ff6000-7f71c8000000 r--p 00026000 fd:01 1586742 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 40 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 40 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 40 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 40 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f71c8000000-7f71c8002000 r--p 00030000 fd:01 1586742 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f71c8002000-7f71c8004000 rw-p 00032000 fd:01 1586742 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Pss_Dirty: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7fff31879000-7fff3189a000 rw-p 00000000 00:00 0 [stack]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Pss_Dirty: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me gd ac
+7fff3191c000-7fff31920000 r--p 00000000 00:00 0 [vvar]
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr pf io de dd sd
+7fff31920000-7fff31922000 r-xp 00000000 00:00 0 [vdso]
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Pss_Dirty: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me de sd
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/cpuset.cpus.effective b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/cpuset.cpus.effective
new file mode 100755
index 0000000000..74fc2fb6b0
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/cpuset.cpus.effective
@@ -0,0 +1 @@
+0-7
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/io.stat b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/io.stat
new file mode 100755
index 0000000000..04f98388f4
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/io.stat
@@ -0,0 +1,8 @@
+253:2 rbytes=2110803968 wbytes=8333664256 rios=515333 wios=1682507 dbytes=0 dios=0
+8:32 rbytes=50547765248 wbytes=0 rios=12340763 wios=0 dbytes=0 dios=0
+253:16 rbytes=50547765248 wbytes=3666890752 rios=12340763 wios=566510 dbytes=0 dios=0
+253:1 rbytes=9051578368 wbytes=3648737280 rios=879731 wios=167625 dbytes=0 dios=0
+8:16 rbytes=21434400768 wbytes=0 rios=2586700 wios=0 dbytes=0 dios=0
+9:0 rbytes=21434400768 wbytes=0 rios=2586700 wios=1033447 dbytes=0 dios=0
+253:0 rbytes=21433970688 wbytes=107989528576 rios=2586167 wios=5402495 dbytes=0 dios=0
+253:3 rbytes=10271588352 wbytes=181110276096 rios=1191103 wios=15544929 dbytes=0 dios=0
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.max b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.max
new file mode 100755
index 0000000000..1c1d3e7c30
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.max
@@ -0,0 +1 @@
+max 100000
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.stat b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.stat
new file mode 100755
index 0000000000..ccf356414e
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.stat
@@ -0,0 +1,8 @@
+usage_usec 1055978930168
+user_usec 980146248781
+system_usec 75832681387
+nr_periods 0
+nr_throttled 0
+throttled_usec 0
+nr_bursts 0
+burst_usec 0
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current
new file mode 100755
index 0000000000..90f5f91cbf
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current
@@ -0,0 +1 @@
+12591513600
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.stat b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.stat
new file mode 100755
index 0000000000..84b90e5d0d
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.stat
@@ -0,0 +1,51 @@
+anon 9158508544
+file 2762801152
+kernel 503017472
+kernel_stack 27049984
+pagetables 149635072
+sec_pagetables 0
+percpu 58040
+sock 217088
+vmalloc 630784
+shmem 2040651776
+zswap 0
+zswapped 0
+file_mapped 445124608
+file_dirty 7008256
+file_writeback 0
+swapcached 170151936
+anon_thp 981467136
+file_thp 0
+shmem_thp 0
+inactive_anon 6160973824
+active_anon 4963110912
+inactive_file 213557248
+active_file 508547072
+unevictable 240934912
+slab_reclaimable 227201576
+slab_unreclaimable 94041680
+slab 321243256
+workingset_refault_anon 496572
+workingset_refault_file 2613659
+workingset_activate_anon 61432
+workingset_activate_file 1430266
+workingset_restore_anon 5935
+workingset_restore_file 922840
+workingset_nodereclaim 0
+pgscan 18707280
+pgsteal 10023314
+pgscan_kswapd 14949081
+pgscan_direct 3758199
+pgsteal_kswapd 8515423
+pgsteal_direct 1507891
+pgfault 5724466729
+pgmajfault 271316
+pgrefill 5283337
+pgactivate 130257374
+pgdeactivate 3808695
+pglazyfree 0
+pglazyfreed 0
+zswpin 0
+zswpout 0
+thp_fault_alloc 102655
+thp_collapse_alloc 5073
diff --git a/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current
new file mode 100755
index 0000000000..dd476bae9c
--- /dev/null
+++ b/lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current
@@ -0,0 +1 @@
+3554775040
diff --git a/lib/crunchstat/testdata/ubuntu1804/proc/2523/cgroup b/lib/crunchstat/testdata/ubuntu1804/proc/2523/cgroup
new file mode 100755
index 0000000000..a56b7e2330
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/proc/2523/cgroup
@@ -0,0 +1,13 @@
+12:freezer:/
+11:rdma:/
+10:devices:/user.slice
+9:blkio:/user.slice
+8:net_cls,net_prio:/
+7:cpu,cpuacct:/user.slice
+6:memory:/user.slice
+5:cpuset:/
+4:perf_event:/
+3:pids:/user.slice/user-1000.slice/session-1.scope
+2:hugetlb:/
+1:name=systemd:/user.slice/user-1000.slice/session-1.scope
+0::/user.slice/user-1000.slice/session-1.scope
diff --git a/lib/crunchstat/testdata/ubuntu1804/proc/2523/cpuset b/lib/crunchstat/testdata/ubuntu1804/proc/2523/cpuset
new file mode 100755
index 0000000000..b498fd495d
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/proc/2523/cpuset
@@ -0,0 +1 @@
+/
diff --git a/lib/crunchstat/testdata/ubuntu1804/proc/2523/net/dev b/lib/crunchstat/testdata/ubuntu1804/proc/2523/net/dev
new file mode 100755
index 0000000000..d2e7d37656
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/proc/2523/net/dev
@@ -0,0 +1,4 @@
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 8492 102 0 0 0 0 0 0 8492 102 0 0 0 0 0 0
+enp1s0: 392046996 307389 0 31358 0 0 0 0 2402023 32125 0 0 0 0 0 0
diff --git a/lib/crunchstat/testdata/ubuntu1804/proc/cpuinfo b/lib/crunchstat/testdata/ubuntu1804/proc/cpuinfo
new file mode 100755
index 0000000000..8cae8296b5
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/proc/cpuinfo
@@ -0,0 +1,54 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3292.388
+cache size : 16384 KB
+physical id : 0
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.77
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3292.388
+cache size : 16384 KB
+physical id : 1
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.77
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
diff --git a/lib/crunchstat/testdata/ubuntu1804/proc/mounts b/lib/crunchstat/testdata/ubuntu1804/proc/mounts
new file mode 100755
index 0000000000..17d7f08f45
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/proc/mounts
@@ -0,0 +1,34 @@
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,nosuid,relatime,size=986344k,nr_inodes=246586,mode=755 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=204064k,mode=755 0 0
+/dev/mapper/ubuntu--vg-ubuntu--lv / ext4 rw,relatime,data=ordered 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
+tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
+tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
+cgroup /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime 0 0
+cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd 0 0
+pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
+cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
+cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
+cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
+cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
+cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
+cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
+cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
+cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
+cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
+cgroup /sys/fs/cgroup/rdma cgroup rw,nosuid,nodev,noexec,relatime,rdma 0 0
+cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
+mqueue /dev/mqueue mqueue rw,relatime 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=38,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=12761 0 0
+debugfs /sys/kernel/debug debugfs rw,relatime 0 0
+configfs /sys/kernel/config configfs rw,relatime 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
+binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0
+/dev/vda2 /boot ext4 rw,relatime,data=ordered 0 0
+lxcfs /var/lib/lxcfs fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
+tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=204060k,mode=700,uid=1000,gid=1000 0 0
diff --git a/lib/crunchstat/testdata/ubuntu1804/proc/self/smaps b/lib/crunchstat/testdata/ubuntu1804/proc/self/smaps
new file mode 100755
index 0000000000..59f868841f
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/proc/self/smaps
@@ -0,0 +1,1848 @@
+00400000-00403000 r--p 00000000 fd:00 135685 /tmp/arvados-server
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me dw sd
+00403000-01776000 r-xp 00003000 fd:00 135685 /tmp/arvados-server
+Size: 19916 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12492 kB
+Pss: 12492 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12492 kB
+Referenced: 12492 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me dw sd
+01776000-02f28000 r--p 01376000 fd:00 135685 /tmp/arvados-server
+Size: 24264 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 11368 kB
+Pss: 11368 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 11368 kB
+Referenced: 11368 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me dw sd
+02f28000-02f29000 r--p 02b27000 fd:00 135685 /tmp/arvados-server
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me dw ac sd
+02f29000-02fc0000 rw-p 02b28000 fd:00 135685 /tmp/arvados-server
+Size: 604 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 480 kB
+Pss: 480 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 480 kB
+Referenced: 480 kB
+Anonymous: 176 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me dw ac sd
+02fc0000-03007000 rw-p 00000000 00:00 0
+Size: 284 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 100 kB
+Pss: 100 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 100 kB
+Referenced: 100 kB
+Anonymous: 100 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+03d38000-03d59000 rw-p 00000000 00:00 0 [heap]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+c000000000-c000800000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 5996 kB
+Pss: 5996 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 5996 kB
+Referenced: 5996 kB
+Anonymous: 5996 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+c000800000-c004000000 ---p 00000000 00:00 0
+Size: 57344 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f7580000000-7f7580021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me nr sd
+7f7580021000-7f7584000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me nr sd
+7f7584000000-7f7584021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me nr sd
+7f7584021000-7f7588000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me nr sd
+7f7588000000-7f7588021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me nr sd
+7f7588021000-7f758c000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me nr sd
+7f758c000000-7f758c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me nr sd
+7f758c021000-7f7590000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me nr sd
+7f7590000000-7f7590021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me nr sd
+7f7590021000-7f7594000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me nr sd
+7f7597cff000-7f7597d00000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f7597d00000-7f7598500000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f7599ffc000-7f7599ffd000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f7599ffd000-7f759a7fd000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f759a7fd000-7f759a7fe000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f759a7fe000-7f759affe000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f759affe000-7f759afff000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f759afff000-7f759b7ff000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f759b7ff000-7f759b800000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f759b800000-7f759c000000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f759c000000-7f759c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me nr sd
+7f759c021000-7f75a0000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me nr sd
+7f75a0260000-7f75a0500000 rw-p 00000000 00:00 0
+Size: 2688 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 1300 kB
+Pss: 1300 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 1300 kB
+Referenced: 1300 kB
+Anonymous: 1300 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75a0500000-7f75a0501000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75a0501000-7f75a3000000 rw-p 00000000 00:00 0
+Size: 44028 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 136 kB
+Pss: 136 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 136 kB
+Referenced: 136 kB
+Anonymous: 136 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75a3000000-7f75a3200000 rw-p 00000000 00:00 0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd hg
+7f75a3200000-7f75a331d000 rw-p 00000000 00:00 0
+Size: 1140 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75a331d000-7f75b3896000 ---p 00000000 00:00 0
+Size: 267748 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75b3896000-7f75b3897000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75b3897000-7f75c5746000 ---p 00000000 00:00 0
+Size: 293564 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c5746000-7f75c5747000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c5747000-7f75c7b1c000 ---p 00000000 00:00 0
+Size: 36692 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c7b1c000-7f75c7b1d000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c7b1d000-7f75c7f16000 ---p 00000000 00:00 0
+Size: 4068 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c7f16000-7f75c7f1a000 r-xp 00000000 fd:00 132041 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 1 kB
+Shared_Clean: 16 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 16 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c7f1a000-7f75c8119000 ---p 00004000 fd:00 132041 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 2044 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c8119000-7f75c811a000 r--p 00003000 fd:00 132041 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c811a000-7f75c811b000 rw-p 00004000 fd:00 132041 /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c811b000-7f75c811e000 r-xp 00000000 fd:00 136252 /lib/x86_64-linux-gnu/libdl-2.27.so
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c811e000-7f75c831d000 ---p 00003000 fd:00 136252 /lib/x86_64-linux-gnu/libdl-2.27.so
+Size: 2044 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c831d000-7f75c831e000 r--p 00002000 fd:00 136252 /lib/x86_64-linux-gnu/libdl-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c831e000-7f75c831f000 rw-p 00003000 fd:00 136252 /lib/x86_64-linux-gnu/libdl-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c831f000-7f75c833c000 r-xp 00000000 fd:00 132036 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 116 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 60 kB
+Pss: 6 kB
+Shared_Clean: 60 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 60 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c833c000-7f75c853c000 ---p 0001d000 fd:00 132036 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c853c000-7f75c853d000 r--p 0001d000 fd:00 132036 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c853d000-7f75c853e000 rw-p 0001e000 fd:00 132036 /lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c853e000-7f75c8548000 rw-p 00000000 00:00 0
+Size: 40 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8548000-7f75c872f000 r-xp 00000000 fd:00 136249 /lib/x86_64-linux-gnu/libc-2.27.so
+Size: 1948 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 1040 kB
+Pss: 41 kB
+Shared_Clean: 1040 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 1040 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c872f000-7f75c892f000 ---p 001e7000 fd:00 136249 /lib/x86_64-linux-gnu/libc-2.27.so
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c892f000-7f75c8933000 r--p 001e7000 fd:00 136249 /lib/x86_64-linux-gnu/libc-2.27.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c8933000-7f75c8935000 rw-p 001eb000 fd:00 136249 /lib/x86_64-linux-gnu/libc-2.27.so
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8935000-7f75c8939000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 12 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8939000-7f75c8946000 r-xp 00000000 fd:00 131482 /lib/x86_64-linux-gnu/libpam.so.0.83.1
+Size: 52 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 52 kB
+Pss: 6 kB
+Shared_Clean: 52 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 52 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c8946000-7f75c8b45000 ---p 0000d000 fd:00 131482 /lib/x86_64-linux-gnu/libpam.so.0.83.1
+Size: 2044 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c8b45000-7f75c8b46000 r--p 0000c000 fd:00 131482 /lib/x86_64-linux-gnu/libpam.so.0.83.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c8b46000-7f75c8b47000 rw-p 0000d000 fd:00 131482 /lib/x86_64-linux-gnu/libpam.so.0.83.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8b47000-7f75c8b61000 r-xp 00000000 fd:00 136264 /lib/x86_64-linux-gnu/libpthread-2.27.so
+Size: 104 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 100 kB
+Pss: 4 kB
+Shared_Clean: 100 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 100 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c8b61000-7f75c8d60000 ---p 0001a000 fd:00 136264 /lib/x86_64-linux-gnu/libpthread-2.27.so
+Size: 2044 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c8d60000-7f75c8d61000 r--p 00019000 fd:00 136264 /lib/x86_64-linux-gnu/libpthread-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c8d61000-7f75c8d62000 rw-p 0001a000 fd:00 136264 /lib/x86_64-linux-gnu/libpthread-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8d62000-7f75c8d66000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8d66000-7f75c8d7d000 r-xp 00000000 fd:00 136265 /lib/x86_64-linux-gnu/libresolv-2.27.so
+Size: 92 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 60 kB
+Pss: 8 kB
+Shared_Clean: 60 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 60 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me sd
+7f75c8d7d000-7f75c8f7c000 ---p 00017000 fd:00 136265 /lib/x86_64-linux-gnu/libresolv-2.27.so
+Size: 2044 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c8f7c000-7f75c8f7d000 r--p 00016000 fd:00 136265 /lib/x86_64-linux-gnu/libresolv-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me ac sd
+7f75c8f7d000-7f75c8f7e000 rw-p 00017000 fd:00 136265 /lib/x86_64-linux-gnu/libresolv-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8f7e000-7f75c8f80000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c8f80000-7f75c8fa9000 r-xp 00000000 fd:00 135731 /lib/x86_64-linux-gnu/ld-2.27.so
+Size: 164 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 164 kB
+Pss: 6 kB
+Shared_Clean: 164 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 164 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me dw sd
+7f75c8fbc000-7f75c903c000 rw-p 00000000 00:00 0
+Size: 512 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 272 kB
+Pss: 272 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 272 kB
+Referenced: 272 kB
+Anonymous: 272 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c903c000-7f75c90bc000 ---p 00000000 00:00 0
+Size: 512 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c90bc000-7f75c90bd000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c90bd000-7f75c913c000 ---p 00000000 00:00 0
+Size: 508 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: mr mw me sd
+7f75c913c000-7f75c91a3000 rw-p 00000000 00:00 0
+Size: 412 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 68 kB
+Pss: 68 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 68 kB
+Referenced: 68 kB
+Anonymous: 68 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7f75c91a9000-7f75c91aa000 r--p 00029000 fd:00 135731 /lib/x86_64-linux-gnu/ld-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr mw me dw ac sd
+7f75c91aa000-7f75c91ab000 rw-p 0002a000 fd:00 135731 /lib/x86_64-linux-gnu/ld-2.27.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me dw ac sd
+7f75c91ab000-7f75c91ac000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me ac sd
+7ffdc0ff9000-7ffdc101a000 rw-p 00000000 00:00 0 [stack]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd wr mr mw me gd ac
+7ffdc1151000-7ffdc1154000 r--p 00000000 00:00 0 [vvar]
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd mr pf io de dd sd
+7ffdc1154000-7ffdc1156000 r-xp 00000000 00:00 0 [vdso]
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex mr mw me de sd
+ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+VmFlags: rd ex
diff --git a/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes
new file mode 100755
index 0000000000..77ad60d0ac
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes
@@ -0,0 +1,11 @@
+252:0 Read 6119424
+252:0 Write 0
+252:0 Sync 6119424
+252:0 Async 0
+252:0 Total 6119424
+253:0 Read 6119424
+253:0 Write 0
+253:0 Sync 6119424
+253:0 Async 0
+253:0 Total 6119424
+Total 12238848
diff --git a/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat
new file mode 100755
index 0000000000..5bb0142e13
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat
@@ -0,0 +1,2 @@
+user 243
+system 255
diff --git a/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpuset/cpuset.cpus b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpuset/cpuset.cpus
new file mode 100755
index 0000000000..8b0fab869c
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpuset/cpuset.cpus
@@ -0,0 +1 @@
+0-1
diff --git a/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/memory/user.slice/memory.stat b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/memory/user.slice/memory.stat
new file mode 100755
index 0000000000..dc60bbb158
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/memory/user.slice/memory.stat
@@ -0,0 +1,33 @@
+cache 55107584
+rss 14348288
+rss_huge 0
+shmem 0
+mapped_file 25276416
+dirty 45821952
+writeback 0
+pgpgin 61677
+pgpgout 44641
+pgfault 85734
+pgmajfault 66
+inactive_anon 0
+active_anon 14536704
+inactive_file 25812992
+active_file 29433856
+unevictable 0
+hierarchical_memory_limit 9223372036854771712
+total_cache 55107584
+total_rss 14348288
+total_rss_huge 0
+total_shmem 0
+total_mapped_file 25276416
+total_dirty 45821952
+total_writeback 0
+total_pgpgin 61677
+total_pgpgout 44641
+total_pgfault 85734
+total_pgmajfault 66
+total_inactive_anon 0
+total_active_anon 14536704
+total_inactive_file 25812992
+total_active_file 29433856
+total_unevictable 0
diff --git a/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-1.scope/cpu.stat b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-1.scope/cpu.stat
new file mode 100755
index 0000000000..6d71376132
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-1.scope/cpu.stat
@@ -0,0 +1,3 @@
+usage_usec 4947324
+user_usec 2409841
+system_usec 2537483
diff --git a/lib/crunchstat/testdata/ubuntu2004/proc/1360/cgroup b/lib/crunchstat/testdata/ubuntu2004/proc/1360/cgroup
new file mode 100755
index 0000000000..a4a34a433c
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/proc/1360/cgroup
@@ -0,0 +1,13 @@
+12:net_cls,net_prio:/
+11:pids:/user.slice/user-1000.slice/session-2.scope
+10:hugetlb:/
+9:cpuset:/
+8:perf_event:/
+7:cpu,cpuacct:/user.slice
+6:devices:/user.slice
+5:rdma:/
+4:blkio:/user.slice
+3:memory:/user.slice/user-1000.slice/session-2.scope
+2:freezer:/
+1:name=systemd:/user.slice/user-1000.slice/session-2.scope
+0::/user.slice/user-1000.slice/session-2.scope
diff --git a/lib/crunchstat/testdata/ubuntu2004/proc/1360/cpuset b/lib/crunchstat/testdata/ubuntu2004/proc/1360/cpuset
new file mode 100755
index 0000000000..b498fd495d
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/proc/1360/cpuset
@@ -0,0 +1 @@
+/
diff --git a/lib/crunchstat/testdata/ubuntu2004/proc/1360/net/dev b/lib/crunchstat/testdata/ubuntu2004/proc/1360/net/dev
new file mode 100755
index 0000000000..320a0e898c
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/proc/1360/net/dev
@@ -0,0 +1,4 @@
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 7232 92 0 0 0 0 0 0 7232 92 0 0 0 0 0 0
+enp1s0: 48329280 34878 0 1282 0 0 0 0 257876 3434 0 0 0 0 0 0
diff --git a/lib/crunchstat/testdata/ubuntu2004/proc/cpuinfo b/lib/crunchstat/testdata/ubuntu2004/proc/cpuinfo
new file mode 100755
index 0000000000..f212206325
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/proc/cpuinfo
@@ -0,0 +1,54 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3292.388
+cache size : 16384 KB
+physical id : 0
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.77
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3292.388
+cache size : 16384 KB
+physical id : 1
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.77
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
diff --git a/lib/crunchstat/testdata/ubuntu2004/proc/mounts b/lib/crunchstat/testdata/ubuntu2004/proc/mounts
new file mode 100755
index 0000000000..6e4a3f222d
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/proc/mounts
@@ -0,0 +1,42 @@
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,nosuid,noexec,relatime,size=1960772k,nr_inodes=490193,mode=755 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=401380k,mode=755 0 0
+/dev/mapper/ubuntu--vg-ubuntu--lv / ext4 rw,relatime 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
+tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
+tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
+cgroup2 /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0
+cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd 0 0
+pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
+none /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0
+cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
+cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
+cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
+cgroup /sys/fs/cgroup/rdma cgroup rw,nosuid,nodev,noexec,relatime,rdma 0 0
+cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
+cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
+cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
+cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
+cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
+cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
+cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=28,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=16350 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
+mqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0
+debugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0
+tracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0
+configfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0
+binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0
+/dev/loop1 /snap/core20/1974 squashfs ro,nodev,relatime 0 0
+/dev/loop0 /snap/lxd/24061 squashfs ro,nodev,relatime 0 0
+/dev/loop3 /snap/core20/1828 squashfs ro,nodev,relatime 0 0
+/dev/loop2 /snap/snapd/19457 squashfs ro,nodev,relatime 0 0
+/dev/loop4 /snap/snapd/18357 squashfs ro,nodev,relatime 0 0
+/dev/vda2 /boot ext4 rw,relatime 0 0
+tmpfs /run/snapd/ns tmpfs rw,nosuid,nodev,noexec,relatime,size=401380k,mode=755 0 0
+nsfs /run/snapd/ns/lxd.mnt nsfs rw 0 0
+tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=401376k,mode=700,uid=1000,gid=1000 0 0
diff --git a/lib/crunchstat/testdata/ubuntu2004/proc/self/smaps b/lib/crunchstat/testdata/ubuntu2004/proc/self/smaps
new file mode 100755
index 0000000000..3ce8dba968
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/proc/self/smaps
@@ -0,0 +1,2231 @@
+00400000-00403000 r--p 00000000 fd:00 11041 /tmp/arvados-server
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+00403000-01776000 r-xp 00003000 fd:00 11041 /tmp/arvados-server
+Size: 19916 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12492 kB
+Pss: 12492 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12492 kB
+Referenced: 12492 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me dw sd
+01776000-02f28000 r--p 01376000 fd:00 11041 /tmp/arvados-server
+Size: 24264 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 10856 kB
+Pss: 10856 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 10856 kB
+Referenced: 10856 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+02f28000-02f29000 r--p 02b27000 fd:00 11041 /tmp/arvados-server
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw ac sd
+02f29000-02fc0000 rw-p 02b28000 fd:00 11041 /tmp/arvados-server
+Size: 604 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 480 kB
+Pss: 480 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 480 kB
+Referenced: 480 kB
+Anonymous: 176 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me dw ac sd
+02fc0000-03007000 rw-p 00000000 00:00 0
+Size: 284 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 104 kB
+Pss: 104 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 104 kB
+Referenced: 104 kB
+Anonymous: 104 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+04a85000-04aa6000 rw-p 00000000 00:00 0 [heap]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+c000000000-c000800000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 5756 kB
+Pss: 5756 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 5756 kB
+Referenced: 5756 kB
+Anonymous: 5756 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+c000800000-c004000000 ---p 00000000 00:00 0
+Size: 57344 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8ce8000000-7f8ce8021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f8ce8021000-7f8cec000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f8cf0000000-7f8cf0021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f8cf0021000-7f8cf4000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f8cf4000000-7f8cf4021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f8cf4021000-7f8cf8000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f8cf8000000-7f8cf8021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f8cf8021000-7f8cfc000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f8cfc000000-7f8cfc021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f8cfc021000-7f8d00000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f8d00000000-7f8d00021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f8d00021000-7f8d04000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f8d05302000-7f8d05452000 rw-p 00000000 00:00 0
+Size: 1344 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 1004 kB
+Pss: 1004 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 1004 kB
+Referenced: 1004 kB
+Anonymous: 1004 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d05452000-7f8d05453000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d05453000-7f8d05c63000 rw-p 00000000 00:00 0
+Size: 8256 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 20 kB
+Pss: 20 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 20 kB
+Referenced: 20 kB
+Anonymous: 20 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d05c63000-7f8d05c64000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d05c64000-7f8d06604000 rw-p 00000000 00:00 0
+Size: 9856 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 304 kB
+Pss: 304 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 304 kB
+Referenced: 304 kB
+Anonymous: 304 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d06604000-7f8d06605000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d06605000-7f8d06e45000 rw-p 00000000 00:00 0
+Size: 8448 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 240 kB
+Pss: 240 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 240 kB
+Referenced: 240 kB
+Anonymous: 240 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d06e45000-7f8d06e46000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d06e46000-7f8d07646000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d07646000-7f8d07647000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d07647000-7f8d07e47000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d07e47000-7f8d07e48000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d07e48000-7f8d0aa00000 rw-p 00000000 00:00 0
+Size: 44768 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 152 kB
+Pss: 152 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 152 kB
+Referenced: 152 kB
+Anonymous: 152 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d0aa00000-7f8d0ac00000 rw-p 00000000 00:00 0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd hg
+7f8d0ac00000-7f8d0ac84000 rw-p 00000000 00:00 0
+Size: 528 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d0ac84000-7f8d1b1fd000 ---p 00000000 00:00 0
+Size: 267748 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d1b1fd000-7f8d1b1fe000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d1b1fe000-7f8d2d0ad000 ---p 00000000 00:00 0
+Size: 293564 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d2d0ad000-7f8d2d0ae000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2d0ae000-7f8d2f483000 ---p 00000000 00:00 0
+Size: 36692 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d2f483000-7f8d2f484000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2f484000-7f8d2f8fd000 ---p 00000000 00:00 0
+Size: 4580 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d2f8fd000-7f8d2f8fe000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2f8fe000-7f8d2f97d000 ---p 00000000 00:00 0
+Size: 508 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d2f97d000-7f8d2f9e0000 rw-p 00000000 00:00 0
+Size: 396 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 56 kB
+Pss: 56 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 56 kB
+Referenced: 56 kB
+Anonymous: 56 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2f9e0000-7f8d2f9e2000 r--p 00000000 fd:00 12252 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2f9e2000-7f8d2f9e5000 r-xp 00002000 fd:00 12252 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2f9e5000-7f8d2f9e6000 r--p 00005000 fd:00 12252 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2f9e6000-7f8d2f9e7000 r--p 00005000 fd:00 12252 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2f9e7000-7f8d2f9e8000 rw-p 00006000 fd:00 12252 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2f9e8000-7f8d2f9ea000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2f9ea000-7f8d2f9eb000 r--p 00000000 fd:00 12268 /usr/lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2f9eb000-7f8d2f9ed000 r-xp 00001000 fd:00 12268 /usr/lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2f9ed000-7f8d2f9ee000 r--p 00003000 fd:00 12268 /usr/lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2f9ee000-7f8d2f9ef000 r--p 00003000 fd:00 12268 /usr/lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2f9ef000-7f8d2f9f0000 rw-p 00004000 fd:00 12268 /usr/lib/x86_64-linux-gnu/libdl-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2f9f0000-7f8d2f9f3000 r--p 00000000 fd:00 12234 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2f9f3000-7f8d2f9fb000 r-xp 00003000 fd:00 12234 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 28 kB
+Pss: 1 kB
+Shared_Clean: 28 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 28 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2f9fb000-7f8d2fa0f000 r--p 0000b000 fd:00 12234 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 80 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fa0f000-7f8d2fa10000 ---p 0001f000 fd:00 12234 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f8d2fa10000-7f8d2fa11000 r--p 0001f000 fd:00 12234 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2fa11000-7f8d2fa12000 rw-p 00020000 fd:00 12234 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fa12000-7f8d2fa1c000 rw-p 00000000 00:00 0
+Size: 40 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fa1c000-7f8d2fa3e000 r--p 00000000 fd:00 12250 /usr/lib/x86_64-linux-gnu/libc-2.31.so
+Size: 136 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 136 kB
+Pss: 4 kB
+Shared_Clean: 136 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 136 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fa3e000-7f8d2fbb6000 r-xp 00022000 fd:00 12250 /usr/lib/x86_64-linux-gnu/libc-2.31.so
+Size: 1504 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 768 kB
+Pss: 28 kB
+Shared_Clean: 768 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 768 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2fbb6000-7f8d2fc04000 r--p 0019a000 fd:00 12250 /usr/lib/x86_64-linux-gnu/libc-2.31.so
+Size: 312 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 128 kB
+Pss: 4 kB
+Shared_Clean: 128 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 128 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc04000-7f8d2fc08000 r--p 001e7000 fd:00 12250 /usr/lib/x86_64-linux-gnu/libc-2.31.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2fc08000-7f8d2fc0a000 rw-p 001eb000 fd:00 12250 /usr/lib/x86_64-linux-gnu/libc-2.31.so
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc0a000-7f8d2fc0e000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 12 kB
+Referenced: 12 kB
+Anonymous: 12 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc0e000-7f8d2fc11000 r--p 00000000 fd:00 12406 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc11000-7f8d2fc1a000 r-xp 00003000 fd:00 12406 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 36 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 36 kB
+Pss: 2 kB
+Shared_Clean: 36 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 36 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2fc1a000-7f8d2fc1e000 r--p 0000c000 fd:00 12406 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc1e000-7f8d2fc1f000 r--p 0000f000 fd:00 12406 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2fc1f000-7f8d2fc20000 rw-p 00010000 fd:00 12406 /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc20000-7f8d2fc26000 r--p 00000000 fd:00 12434 /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 24 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 24 kB
+Pss: 0 kB
+Shared_Clean: 24 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 24 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc26000-7f8d2fc37000 r-xp 00006000 fd:00 12434 /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 68 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 68 kB
+Pss: 2 kB
+Shared_Clean: 68 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 68 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2fc37000-7f8d2fc3d000 r--p 00017000 fd:00 12434 /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 24 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc3d000-7f8d2fc3e000 r--p 0001c000 fd:00 12434 /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2fc3e000-7f8d2fc3f000 rw-p 0001d000 fd:00 12434 /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc3f000-7f8d2fc43000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc43000-7f8d2fc47000 r--p 00000000 fd:00 12439 /usr/lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 1 kB
+Shared_Clean: 16 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 16 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc47000-7f8d2fc57000 r-xp 00004000 fd:00 12439 /usr/lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 64 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 64 kB
+Pss: 6 kB
+Shared_Clean: 64 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 64 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f8d2fc57000-7f8d2fc5b000 r--p 00014000 fd:00 12439 /usr/lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f8d2fc5b000-7f8d2fc5c000 r--p 00017000 fd:00 12439 /usr/lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f8d2fc5c000-7f8d2fc5d000 rw-p 00018000 fd:00 12439 /usr/lib/x86_64-linux-gnu/libresolv-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc5d000-7f8d2fc61000 rw-p 00000000 00:00 0
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f8d2fc68000-7f8d2fc69000 r--p 00000000 fd:00 12105 /usr/lib/x86_64-linux-gnu/ld-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+7f8d2fc69000-7f8d2fc8c000 r-xp 00001000 fd:00 12105 /usr/lib/x86_64-linux-gnu/ld-2.31.so
+Size: 140 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 140 kB
+Pss: 4 kB
+Shared_Clean: 140 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 140 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me dw sd
+7f8d2fc8c000-7f8d2fc94000 r--p 00024000 fd:00 12105 /usr/lib/x86_64-linux-gnu/ld-2.31.so
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 1 kB
+Shared_Clean: 32 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 32 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw sd
+7f8d2fc95000-7f8d2fc96000 r--p 0002c000 fd:00 12105 /usr/lib/x86_64-linux-gnu/ld-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me dw ac sd
+7f8d2fc96000-7f8d2fc97000 rw-p 0002d000 fd:00 12105 /usr/lib/x86_64-linux-gnu/ld-2.31.so
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me dw ac sd
+7f8d2fc97000-7f8d2fc98000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7ffe58cef000-7ffe58d10000 rw-p 00000000 00:00 0 [stack]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me gd ac
+7ffe58d36000-7ffe58d39000 r--p 00000000 00:00 0 [vvar]
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr pf io de dd sd
+7ffe58d39000-7ffe58d3a000 r-xp 00000000 00:00 0 [vdso]
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me de sd
+ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: ex
diff --git a/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes
new file mode 100755
index 0000000000..6aa2cc4a7f
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes
@@ -0,0 +1,13 @@
+252:0 Read 2322432
+252:0 Write 0
+252:0 Sync 2322432
+252:0 Async 0
+252:0 Discard 0
+252:0 Total 2322432
+253:0 Read 2322432
+253:0 Write 0
+253:0 Sync 2322432
+253:0 Async 0
+253:0 Discard 0
+253:0 Total 2322432
+Total 4644864
diff --git a/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat
new file mode 100755
index 0000000000..3aac4cc62e
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat
@@ -0,0 +1,2 @@
+user 31
+system 40
diff --git a/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpuset/cpuset.cpus b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpuset/cpuset.cpus
new file mode 100755
index 0000000000..8b0fab869c
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpuset/cpuset.cpus
@@ -0,0 +1 @@
+0-1
diff --git a/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/memory/user.slice/user-1000.slice/session-2.scope/memory.stat b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/memory/user.slice/user-1000.slice/session-2.scope/memory.stat
new file mode 100755
index 0000000000..d31dffc77b
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/memory/user.slice/user-1000.slice/session-2.scope/memory.stat
@@ -0,0 +1,33 @@
+cache 47984640
+rss 12845056
+rss_huge 0
+shmem 0
+mapped_file 24870912
+dirty 45821952
+writeback 0
+pgpgin 25839
+pgpgout 10933
+pgfault 18513
+pgmajfault 0
+inactive_anon 0
+active_anon 12840960
+inactive_file 47579136
+active_file 270336
+unevictable 0
+hierarchical_memory_limit 9223372036854771712
+total_cache 47984640
+total_rss 12845056
+total_rss_huge 0
+total_shmem 0
+total_mapped_file 24870912
+total_dirty 45821952
+total_writeback 0
+total_pgpgin 25839
+total_pgpgout 10933
+total_pgfault 18513
+total_pgmajfault 0
+total_inactive_anon 0
+total_active_anon 12840960
+total_inactive_file 47579136
+total_active_file 270336
+total_unevictable 0
diff --git a/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-2.scope/cpu.stat b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-2.scope/cpu.stat
new file mode 100755
index 0000000000..25fa4a7124
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-2.scope/cpu.stat
@@ -0,0 +1,3 @@
+usage_usec 843527
+user_usec 355576
+system_usec 487951
diff --git a/lib/crunchstat/testdata/ubuntu2204/proc/1967/cgroup b/lib/crunchstat/testdata/ubuntu2204/proc/1967/cgroup
new file mode 100755
index 0000000000..24c88e805a
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/proc/1967/cgroup
@@ -0,0 +1 @@
+0::/user.slice/user-1000.slice/session-1.scope
diff --git a/lib/crunchstat/testdata/ubuntu2204/proc/1967/cpuset b/lib/crunchstat/testdata/ubuntu2204/proc/1967/cpuset
new file mode 100755
index 0000000000..fb6c61a862
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/proc/1967/cpuset
@@ -0,0 +1 @@
+/user.slice
diff --git a/lib/crunchstat/testdata/ubuntu2204/proc/1967/net/dev b/lib/crunchstat/testdata/ubuntu2204/proc/1967/net/dev
new file mode 100755
index 0000000000..405de33020
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/proc/1967/net/dev
@@ -0,0 +1,4 @@
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 10505 124 0 0 0 0 0 0 10505 124 0 0 0 0 0 0
+enp1s0: 227109019 173999 0 30971 0 0 0 0 1938868 25576 0 0 0 0 0 0
diff --git a/lib/crunchstat/testdata/ubuntu2204/proc/cpuinfo b/lib/crunchstat/testdata/ubuntu2204/proc/cpuinfo
new file mode 100755
index 0000000000..c482b0568b
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/proc/cpuinfo
@@ -0,0 +1,56 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3292.388
+cache size : 16384 KB
+physical id : 0
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.77
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 71
+model name : Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz
+stepping : 1
+microcode : 0x13
+cpu MHz : 3292.388
+cache size : 16384 KB
+physical id : 1
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 20
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities
+vmx flags : vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown
+bogomips : 6584.77
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
diff --git a/lib/crunchstat/testdata/ubuntu2204/proc/mounts b/lib/crunchstat/testdata/ubuntu2204/proc/mounts
new file mode 100755
index 0000000000..f98f16199f
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/proc/mounts
@@ -0,0 +1,30 @@
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,nosuid,relatime,size=1944524k,nr_inodes=486131,mode=755,inode64 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=400584k,mode=755,inode64 0 0
+/dev/mapper/ubuntu--vg-ubuntu--lv / ext4 rw,relatime 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,nosuid,nodev,inode64 0 0
+tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k,inode64 0 0
+cgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot 0 0
+pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
+bpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=29,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=17759 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
+mqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0
+debugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0
+tracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0
+configfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0
+none /run/credentials/systemd-sysusers.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0
+/dev/loop0 /snap/lxd/24322 squashfs ro,nodev,relatime,errors=continue 0 0
+/dev/loop1 /snap/snapd/18357 squashfs ro,nodev,relatime,errors=continue 0 0
+/dev/loop2 /snap/core20/1822 squashfs ro,nodev,relatime,errors=continue 0 0
+/dev/vda2 /boot ext4 rw,relatime 0 0
+binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /run/snapd/ns tmpfs rw,nosuid,nodev,noexec,relatime,size=400584k,mode=755,inode64 0 0
+nsfs /run/snapd/ns/lxd.mnt nsfs rw 0 0
+tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=400580k,nr_inodes=100145,mode=700,uid=1000,gid=1000,inode64 0 0
+/dev/loop3 /snap/snapd/19457 squashfs ro,nodev,relatime,errors=continue 0 0
+/dev/loop4 /snap/core20/1974 squashfs ro,nodev,relatime,errors=continue 0 0
diff --git a/lib/crunchstat/testdata/ubuntu2204/proc/self/smaps b/lib/crunchstat/testdata/ubuntu2204/proc/self/smaps
new file mode 100755
index 0000000000..104eef148f
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/proc/self/smaps
@@ -0,0 +1,1978 @@
+00400000-00403000 r--p 00000000 fd:00 393261 /tmp/arvados-server
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 12 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+00403000-01776000 r-xp 00003000 fd:00 393261 /tmp/arvados-server
+Size: 19916 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12492 kB
+Pss: 12492 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 12492 kB
+Private_Dirty: 0 kB
+Referenced: 12492 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+01776000-02f28000 r--p 01376000 fd:00 393261 /tmp/arvados-server
+Size: 24264 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 11048 kB
+Pss: 11048 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 11048 kB
+Private_Dirty: 0 kB
+Referenced: 11048 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+02f28000-02f29000 r--p 02b27000 fd:00 393261 /tmp/arvados-server
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+02f29000-02fc0000 rw-p 02b28000 fd:00 393261 /tmp/arvados-server
+Size: 604 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 480 kB
+Pss: 480 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 304 kB
+Private_Dirty: 176 kB
+Referenced: 480 kB
+Anonymous: 176 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+02fc0000-03007000 rw-p 00000000 00:00 0
+Size: 284 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 100 kB
+Pss: 100 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 100 kB
+Referenced: 100 kB
+Anonymous: 100 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+03f05000-03f26000 rw-p 00000000 00:00 0 [heap]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+c000000000-c000800000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 5500 kB
+Pss: 5500 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 5500 kB
+Referenced: 5500 kB
+Anonymous: 5500 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+c000800000-c004000000 ---p 00000000 00:00 0
+Size: 57344 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f750c000000-7f750c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f750c021000-7f7510000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f7510000000-7f7510021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7510021000-7f7514000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f7514000000-7f7514021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7514021000-7f7518000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f7518000000-7f7518021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f7518021000-7f751c000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f751c000000-7f751c021000 rw-p 00000000 00:00 0
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me nr sd
+7f751c021000-7f7520000000 ---p 00000000 00:00 0
+Size: 65404 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me nr sd
+7f7520f2f000-7f752108f000 rw-p 00000000 00:00 0
+Size: 1408 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 992 kB
+Pss: 992 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 992 kB
+Referenced: 992 kB
+Anonymous: 992 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f752108f000-7f7521090000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7521090000-7f7521a30000 rw-p 00000000 00:00 0
+Size: 9856 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 332 kB
+Pss: 332 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 332 kB
+Referenced: 332 kB
+Anonymous: 332 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f7521a30000-7f7521a31000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7521a31000-7f7522271000 rw-p 00000000 00:00 0
+Size: 8448 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 244 kB
+Pss: 244 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 244 kB
+Referenced: 244 kB
+Anonymous: 244 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f7522271000-7f7522272000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7522272000-7f7522a72000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f7522a72000-7f7522a73000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7522a73000-7f7523273000 rw-p 00000000 00:00 0
+Size: 8192 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f7523273000-7f7523274000 ---p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7523274000-7f7525e00000 rw-p 00000000 00:00 0
+Size: 44592 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 156 kB
+Pss: 156 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 156 kB
+Referenced: 156 kB
+Anonymous: 156 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f7525e00000-7f7526000000 rw-p 00000000 00:00 0
+Size: 2048 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 1
+VmFlags: rd wr mr mw me ac sd hg
+7f7526000000-7f75260b0000 rw-p 00000000 00:00 0
+Size: 704 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f75260b0000-7f7536629000 ---p 00000000 00:00 0
+Size: 267748 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f7536629000-7f753662a000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f753662a000-7f75484d9000 ---p 00000000 00:00 0
+Size: 293564 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f75484d9000-7f75484da000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f75484da000-7f754a8af000 ---p 00000000 00:00 0
+Size: 36692 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f754a8af000-7f754a8b0000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754a8b0000-7f754ad29000 ---p 00000000 00:00 0
+Size: 4580 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f754ad29000-7f754ad2a000 rw-p 00000000 00:00 0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754ad2a000-7f754ada9000 ---p 00000000 00:00 0
+Size: 508 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: mr mw me sd
+7f754ada9000-7f754ae0c000 rw-p 00000000 00:00 0
+Size: 396 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 56 kB
+Pss: 56 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 56 kB
+Referenced: 56 kB
+Anonymous: 56 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754ae0c000-7f754ae0e000 r--p 00000000 fd:00 11091 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754ae0e000-7f754ae11000 r-xp 00002000 fd:00 11091 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754ae11000-7f754ae12000 r--p 00005000 fd:00 11091 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754ae12000-7f754ae13000 r--p 00005000 fd:00 11091 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754ae13000-7f754ae14000 rw-p 00006000 fd:00 11091 /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754ae14000-7f754ae16000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754ae16000-7f754ae19000 r--p 00000000 fd:00 11071 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754ae19000-7f754ae21000 r-xp 00003000 fd:00 11071 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 32 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 32 kB
+Pss: 2 kB
+Shared_Clean: 32 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 32 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754ae21000-7f754ae36000 r--p 0000b000 fd:00 11071 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 84 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754ae36000-7f754ae37000 r--p 0001f000 fd:00 11071 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754ae37000-7f754ae38000 rw-p 00020000 fd:00 11071 /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754ae38000-7f754ae44000 rw-p 00000000 00:00 0
+Size: 48 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754ae44000-7f754ae6c000 r--p 00000000 fd:00 11089 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 160 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 160 kB
+Pss: 6 kB
+Shared_Clean: 160 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 160 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754ae6c000-7f754b001000 r-xp 00028000 fd:00 11089 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 1620 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 912 kB
+Pss: 40 kB
+Shared_Clean: 912 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 912 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754b001000-7f754b059000 r--p 001bd000 fd:00 11089 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 352 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 128 kB
+Pss: 5 kB
+Shared_Clean: 128 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 128 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b059000-7f754b05d000 r--p 00214000 fd:00 11089 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754b05d000-7f754b05f000 rw-p 00218000 fd:00 11089 /usr/lib/x86_64-linux-gnu/libc.so.6
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b05f000-7f754b06c000 rw-p 00000000 00:00 0
+Size: 52 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 20 kB
+Pss: 20 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 20 kB
+Referenced: 20 kB
+Anonymous: 20 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b06c000-7f754b06f000 r--p 00000000 fd:00 11245 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 0 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b06f000-7f754b078000 r-xp 00003000 fd:00 11245 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 36 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 36 kB
+Pss: 3 kB
+Shared_Clean: 36 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 36 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754b078000-7f754b07c000 r--p 0000c000 fd:00 11245 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b07c000-7f754b07d000 r--p 0000f000 fd:00 11245 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754b07d000-7f754b07e000 rw-p 00010000 fd:00 11245 /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b07e000-7f754b07f000 r--p 00000000 fd:00 11272 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 2 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b07f000-7f754b080000 r-xp 00001000 fd:00 11272 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 2 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754b080000-7f754b081000 r--p 00002000 fd:00 11272 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b081000-7f754b082000 r--p 00002000 fd:00 11272 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754b082000-7f754b083000 rw-p 00003000 fd:00 11272 /usr/lib/x86_64-linux-gnu/libpthread.so.0
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b083000-7f754b086000 r--p 00000000 fd:00 11276 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 12 kB
+Pss: 3 kB
+Shared_Clean: 12 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 12 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b086000-7f754b090000 r-xp 00003000 fd:00 11276 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 40 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 36 kB
+Pss: 9 kB
+Shared_Clean: 36 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 36 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754b090000-7f754b093000 r--p 0000d000 fd:00 11276 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 12 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b093000-7f754b094000 r--p 0000f000 fd:00 11276 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754b094000-7f754b095000 rw-p 00010000 fd:00 11276 /usr/lib/x86_64-linux-gnu/libresolv.so.2
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 4 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 4 kB
+Referenced: 4 kB
+Anonymous: 4 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b095000-7f754b097000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b09c000-7f754b09e000 rw-p 00000000 00:00 0
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7f754b09e000-7f754b0a0000 r--p 00000000 fd:00 10938 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 0 kB
+Shared_Clean: 8 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 8 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b0a0000-7f754b0ca000 r-xp 00002000 fd:00 10938 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 168 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 168 kB
+Pss: 6 kB
+Shared_Clean: 168 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 168 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me sd
+7f754b0ca000-7f754b0d5000 r--p 0002c000 fd:00 10938 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 44 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 44 kB
+Pss: 1 kB
+Shared_Clean: 44 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 44 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me sd
+7f754b0d6000-7f754b0d8000 r--p 00037000 fd:00 10938 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr mw me ac sd
+7f754b0d8000-7f754b0da000 rw-p 00039000 fd:00 10938 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 8 kB
+Pss: 8 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 8 kB
+Referenced: 8 kB
+Anonymous: 8 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me ac sd
+7ffed2e14000-7ffed2e35000 rw-p 00000000 00:00 0 [stack]
+Size: 132 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 16 kB
+Pss: 16 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 16 kB
+Referenced: 16 kB
+Anonymous: 16 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd wr mr mw me gd ac
+7ffed2fc4000-7ffed2fc8000 r--p 00000000 00:00 0 [vvar]
+Size: 16 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd mr pf io de dd sd
+7ffed2fc8000-7ffed2fca000 r-xp 00000000 00:00 0 [vdso]
+Size: 8 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 4 kB
+Pss: 0 kB
+Shared_Clean: 4 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 4 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: rd ex mr mw me de sd
+ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]
+Size: 4 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
+Rss: 0 kB
+Pss: 0 kB
+Shared_Clean: 0 kB
+Shared_Dirty: 0 kB
+Private_Clean: 0 kB
+Private_Dirty: 0 kB
+Referenced: 0 kB
+Anonymous: 0 kB
+LazyFree: 0 kB
+AnonHugePages: 0 kB
+ShmemPmdMapped: 0 kB
+FilePmdMapped: 0 kB
+Shared_Hugetlb: 0 kB
+Private_Hugetlb: 0 kB
+Swap: 0 kB
+SwapPss: 0 kB
+Locked: 0 kB
+THPeligible: 0
+VmFlags: ex
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpu.max b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpu.max
new file mode 100755
index 0000000000..1c1d3e7c30
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpu.max
@@ -0,0 +1 @@
+max 100000
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpuset.cpus.effective b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpuset.cpus.effective
new file mode 100755
index 0000000000..8b0fab869c
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpuset.cpus.effective
@@ -0,0 +1 @@
+0-1
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/io.stat b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/io.stat
new file mode 100755
index 0000000000..97b7e1cb3b
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/io.stat
@@ -0,0 +1,2 @@
+252:0 rbytes=3551232 wbytes=147263488 rios=141 wios=208 dbytes=0 dios=0
+253:0 rbytes=3551232 wbytes=147263488 rios=141 wios=109 dbytes=0 dios=0
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/cpu.stat b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/cpu.stat
new file mode 100755
index 0000000000..cf516a6fb8
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/cpu.stat
@@ -0,0 +1,3 @@
+usage_usec 1750563
+user_usec 703305
+system_usec 1047257
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.current b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.current
new file mode 100755
index 0000000000..b779bcd240
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.current
@@ -0,0 +1 @@
+68902912
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.stat b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.stat
new file mode 100755
index 0000000000..fbf50f1b51
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.stat
@@ -0,0 +1,40 @@
+anon 13606912
+file 52432896
+kernel_stack 180224
+pagetables 438272
+percpu 0
+sock 0
+shmem 4096
+file_mapped 25767936
+file_dirty 86016
+file_writeback 0
+swapcached 0
+anon_thp 0
+file_thp 0
+shmem_thp 0
+inactive_anon 13574144
+active_anon 20480
+inactive_file 26722304
+active_file 25669632
+unevictable 0
+slab_reclaimable 1646344
+slab_unreclaimable 328072
+slab 1974416
+workingset_refault_anon 0
+workingset_refault_file 0
+workingset_activate_anon 0
+workingset_activate_file 0
+workingset_restore_anon 0
+workingset_restore_file 0
+workingset_nodereclaim 0
+pgfault 33355
+pgmajfault 27
+pgrefill 0
+pgscan 0
+pgsteal 0
+pgactivate 6253
+pgdeactivate 0
+pglazyfree 0
+pglazyfreed 0
+thp_fault_alloc 0
+thp_collapse_alloc 0
diff --git a/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.swap.current b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.swap.current
new file mode 100755
index 0000000000..573541ac97
--- /dev/null
+++ b/lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.swap.current
@@ -0,0 +1 @@
+0
diff --git a/lib/ctrlctx/auth.go b/lib/ctrlctx/auth.go
new file mode 100644
index 0000000000..31746b64cc
--- /dev/null
+++ b/lib/ctrlctx/auth.go
@@ -0,0 +1,211 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ctrlctx
+
+import (
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "database/sql"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/controller/api"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "github.com/ghodss/yaml"
+)
+
+var (
+ ErrNoAuthContext = errors.New("bug: there is no authorization in this context")
+ ErrUnauthenticated = errors.New("unauthenticated request")
+)
+
+// WrapCallsWithAuth returns a call wrapper (suitable for assigning to
+// router.router.WrapCalls) that makes CurrentUser(ctx) et al. work
+// from inside the wrapped functions.
+//
+// The incoming context must come from WrapCallsInTransactions or
+// NewWithTransaction.
+func WrapCallsWithAuth(cluster *arvados.Cluster) func(api.RoutableFunc) api.RoutableFunc {
+ var authcache authcache
+ return func(origFunc api.RoutableFunc) api.RoutableFunc {
+ return func(ctx context.Context, opts interface{}) (_ interface{}, err error) {
+ var tokens []string
+ if creds, ok := auth.FromContext(ctx); ok {
+ tokens = creds.Tokens
+ }
+ return origFunc(context.WithValue(ctx, contextKeyAuth, &authcontext{
+ authcache: &authcache,
+ cluster: cluster,
+ tokens: tokens,
+ }), opts)
+ }
+ }
+}
+
+// NewWithToken returns a context with the provided auth token.
+//
+// The incoming context must come from WrapCallsInTransactions or
+// NewWithTransaction.
+//
+// Used for attaching system auth to background threads.
+//
+// Also useful for tests, where context doesn't necessarily come from
+// a router that uses WrapCallsWithAuth.
+//
+// The returned context comes with its own token lookup cache, so
+// NewWithToken is not appropriate to use in a per-request code path.
+func NewWithToken(ctx context.Context, cluster *arvados.Cluster, token string) context.Context {
+ ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{token}})
+ return context.WithValue(ctx, contextKeyAuth, &authcontext{
+ authcache: &authcache{},
+ cluster: cluster,
+ tokens: []string{token},
+ })
+}
+
+// CurrentAuth returns the arvados.User whose privileges should be
+// used in the given context, and the arvados.APIClientAuthorization
+// the caller presented in order to authenticate the current request.
+//
+// Returns ErrUnauthenticated if the current request was not
+// authenticated (no token provided, token is expired, etc).
+func CurrentAuth(ctx context.Context) (*arvados.User, *arvados.APIClientAuthorization, error) {
+ ac, ok := ctx.Value(contextKeyAuth).(*authcontext)
+ if !ok {
+ return nil, nil, ErrNoAuthContext
+ }
+ ac.lookupOnce.Do(func() {
+ // We only validate/lookup the token once per API
+ // call, even though authcache should be efficient
+ // enough to do a lookup each time. This guarantees we
+ // always return the same result when called multiple
+ // times in the course of handling a single API call.
+ for _, token := range ac.tokens {
+ user, aca, err := ac.authcache.lookup(ctx, ac.cluster, token)
+ if err != nil {
+ ac.err = err
+ return
+ }
+ if user != nil {
+ ac.user, ac.apiClientAuthorization = user, aca
+ return
+ }
+ }
+ ac.err = ErrUnauthenticated
+ })
+ return ac.user, ac.apiClientAuthorization, ac.err
+}
+
+type contextKeyA string
+
+var contextKeyAuth = contextKeyT("auth")
+
+type authcontext struct {
+ authcache *authcache
+ cluster *arvados.Cluster
+ tokens []string
+ user *arvados.User
+ apiClientAuthorization *arvados.APIClientAuthorization
+ err error
+ lookupOnce sync.Once
+}
+
+var authcacheTTL = time.Minute
+
+type authcacheent struct {
+ expireTime time.Time
+ apiClientAuthorization arvados.APIClientAuthorization
+ user arvados.User
+}
+
+type authcache struct {
+ mtx sync.Mutex
+ entries map[string]*authcacheent
+ nextCleanup time.Time
+}
+
+// lookup returns the user and aca info for a given token. Returns nil
+// if the token is not valid. Returns a non-nil error if there was an
+// unexpected error from the database, etc.
+func (ac *authcache) lookup(ctx context.Context, cluster *arvados.Cluster, token string) (*arvados.User, *arvados.APIClientAuthorization, error) {
+ ac.mtx.Lock()
+ ent := ac.entries[token]
+ ac.mtx.Unlock()
+ if ent != nil && ent.expireTime.After(time.Now()) {
+ return &ent.user, &ent.apiClientAuthorization, nil
+ }
+ if token == "" {
+ return nil, nil, nil
+ }
+ tx, err := CurrentTx(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ var aca arvados.APIClientAuthorization
+ var user arvados.User
+
+ var cond string
+ var args []interface{}
+ if len(token) > 30 && strings.HasPrefix(token, "v2/") && token[30] == '/' {
+ fields := strings.Split(token, "/")
+ cond = `aca.uuid = $1 and aca.api_token = $2`
+ args = []interface{}{fields[1], fields[2]}
+ } else {
+ // Bare token or OIDC access token
+ mac := hmac.New(sha256.New, []byte(cluster.SystemRootToken))
+ io.WriteString(mac, token)
+ hmac := fmt.Sprintf("%x", mac.Sum(nil))
+ cond = `aca.api_token in ($1, $2)`
+ args = []interface{}{token, hmac}
+ }
+ var expiresAt sql.NullTime
+ var scopesYAML []byte
+ err = tx.QueryRowContext(ctx, `
+select aca.uuid, aca.expires_at, aca.api_token, aca.scopes, users.uuid, users.is_active, users.is_admin
+ from api_client_authorizations aca
+ left join users on aca.user_id = users.id
+ where `+cond+`
+ and (expires_at is null or expires_at > current_timestamp at time zone 'UTC')`, args...).Scan(
+ &aca.UUID, &expiresAt, &aca.APIToken, &scopesYAML,
+ &user.UUID, &user.IsActive, &user.IsAdmin)
+ if err == sql.ErrNoRows {
+ return nil, nil, nil
+ } else if err != nil {
+ return nil, nil, err
+ }
+ aca.ExpiresAt = expiresAt.Time
+ if len(scopesYAML) > 0 {
+ err = yaml.Unmarshal(scopesYAML, &aca.Scopes)
+ if err != nil {
+ return nil, nil, fmt.Errorf("loading scopes for %s: %w", aca.UUID, err)
+ }
+ }
+ ent = &authcacheent{
+ expireTime: time.Now().Add(authcacheTTL),
+ apiClientAuthorization: aca,
+ user: user,
+ }
+ ac.mtx.Lock()
+ defer ac.mtx.Unlock()
+ if ac.entries == nil {
+ ac.entries = map[string]*authcacheent{}
+ }
+ if ac.nextCleanup.IsZero() || ac.nextCleanup.Before(time.Now()) {
+ for token, ent := range ac.entries {
+ if !ent.expireTime.After(time.Now()) {
+ delete(ac.entries, token)
+ }
+ }
+ ac.nextCleanup = time.Now().Add(authcacheTTL)
+ }
+ ac.entries[token] = ent
+ return &ent.user, &ent.apiClientAuthorization, nil
+}
diff --git a/lib/ctrlctx/auth_test.go b/lib/ctrlctx/auth_test.go
new file mode 100644
index 0000000000..e6803e5c4e
--- /dev/null
+++ b/lib/ctrlctx/auth_test.go
@@ -0,0 +1,83 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ctrlctx
+
+import (
+ "context"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/jmoiron/sqlx"
+ _ "github.com/lib/pq"
+ check "gopkg.in/check.v1"
+)
+
+func (*DatabaseSuite) TestAuthContext(c *check.C) {
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ cluster, err := cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+
+ getter := func(context.Context) (*sqlx.DB, error) {
+ return sqlx.Open("postgres", cluster.PostgreSQL.Connection.String())
+ }
+ authwrapper := WrapCallsWithAuth(cluster)
+ dbwrapper := WrapCallsInTransactions(getter)
+
+ // valid tokens
+ for _, token := range []string{
+ arvadostest.ActiveToken,
+ arvadostest.ActiveTokenV2,
+ arvadostest.ActiveTokenV2 + "/asdfasdfasdf",
+ arvadostest.ActiveTokenV2, // cached
+ } {
+ ok, err := dbwrapper(authwrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ user, aca, err := CurrentAuth(ctx)
+ if c.Check(err, check.IsNil) {
+ c.Check(user.UUID, check.Equals, "zzzzz-tpzed-xurymjxw79nv3jz")
+ c.Check(aca.UUID, check.Equals, "zzzzz-gj3su-077z32aux8dg2s1")
+ c.Check(aca.Scopes, check.DeepEquals, []string{"all"})
+ }
+ return true, nil
+ }))(auth.NewContext(context.Background(), auth.NewCredentials(token)), "blah")
+ c.Check(ok, check.Equals, true)
+ c.Check(err, check.IsNil)
+ }
+
+ // bad tokens
+ for _, token := range []string{
+ arvadostest.ActiveToken + "X",
+ arvadostest.ActiveTokenV2 + "X",
+ arvadostest.ActiveTokenV2[:30], // "v2/{uuid}"
+ arvadostest.ActiveTokenV2[:31], // "v2/{uuid}/"
+ "bogus",
+ "",
+ } {
+ ok, err := dbwrapper(authwrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ user, aca, err := CurrentAuth(ctx)
+ c.Check(err, check.Equals, ErrUnauthenticated)
+ c.Check(user, check.IsNil)
+ c.Check(aca, check.IsNil)
+ return true, err
+ }))(auth.NewContext(context.Background(), auth.NewCredentials(token)), "blah")
+ c.Check(ok, check.Equals, true)
+ c.Check(err, check.Equals, ErrUnauthenticated)
+ }
+
+ // no auth context
+ {
+ ok, err := dbwrapper(authwrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {
+ user, aca, err := CurrentAuth(ctx)
+ c.Check(err, check.Equals, ErrUnauthenticated)
+ c.Check(user, check.IsNil)
+ c.Check(aca, check.IsNil)
+ return true, err
+ }))(context.Background(), "blah")
+ c.Check(ok, check.Equals, true)
+ c.Check(err, check.Equals, ErrUnauthenticated)
+ }
+}
diff --git a/lib/ctrlctx/db.go b/lib/ctrlctx/db.go
index 36d79d3d2e..d33fd8ab53 100644
--- a/lib/ctrlctx/db.go
+++ b/lib/ctrlctx/db.go
@@ -10,8 +10,10 @@ import (
"sync"
"git.arvados.org/arvados.git/lib/controller/api"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/jmoiron/sqlx"
+
// sqlx needs lib/pq to talk to PostgreSQL
_ "github.com/lib/pq"
)
@@ -67,7 +69,7 @@ type finishFunc func(*error)
// commit or rollback the transaction, if any.
//
// func example(ctx context.Context) (err error) {
-// ctx, finishtx := New(ctx, dber)
+// ctx, finishtx := New(ctx, getdb)
// defer finishtx(&err)
// // ...
// tx, err := CurrentTx(ctx)
@@ -107,6 +109,26 @@ func New(ctx context.Context, getdb func(context.Context) (*sqlx.DB, error)) (co
}
}
+// NewTx starts a new transaction. The caller is responsible for
+// calling Commit or Rollback. This is suitable for database queries
+// that are separate from the API transaction (see CurrentTx), e.g.,
+// ones that will be committed even if the API call fails, or held
+// open after the API call finishes.
+func NewTx(ctx context.Context) (*sqlx.Tx, error) {
+ txn, ok := ctx.Value(contextKeyTransaction).(*transaction)
+ if !ok {
+ return nil, ErrNoTransaction
+ }
+ db, err := txn.getdb(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return db.Beginx()
+}
+
+// CurrentTx returns a transaction that will be committed after the
+// current API call completes, or rolled back if the current API call
+// returns an error.
func CurrentTx(ctx context.Context) (*sqlx.Tx, error) {
txn, ok := ctx.Value(contextKeyTransaction).(*transaction)
if !ok {
@@ -121,3 +143,45 @@ func CurrentTx(ctx context.Context) (*sqlx.Tx, error) {
})
return txn.tx, txn.err
}
+
+var errDBConnection = errors.New("database connection error")
+
+type DBConnector struct {
+ PostgreSQL arvados.PostgreSQL
+ pgdb *sqlx.DB
+ mtx sync.Mutex
+}
+
+func (dbc *DBConnector) GetDB(ctx context.Context) (*sqlx.DB, error) {
+ dbc.mtx.Lock()
+ defer dbc.mtx.Unlock()
+ if dbc.pgdb != nil {
+ return dbc.pgdb, nil
+ }
+ db, err := sqlx.Open("postgres", dbc.PostgreSQL.Connection.String())
+ if err != nil {
+ ctxlog.FromContext(ctx).WithError(err).Error("postgresql connect failed")
+ return nil, errDBConnection
+ }
+ if p := dbc.PostgreSQL.ConnectionPool; p > 0 {
+ db.SetMaxOpenConns(p)
+ }
+ if err := db.Ping(); err != nil {
+ ctxlog.FromContext(ctx).WithError(err).Error("postgresql connect succeeded but ping failed")
+ db.Close()
+ return nil, errDBConnection
+ }
+ dbc.pgdb = db
+ return db, nil
+}
+
+func (dbc *DBConnector) Close() error {
+ dbc.mtx.Lock()
+ defer dbc.mtx.Unlock()
+ var err error
+ if dbc.pgdb != nil {
+ err = dbc.pgdb.Close()
+ dbc.pgdb = nil
+ }
+ return err
+}
diff --git a/lib/diagnostics/cmd.go b/lib/diagnostics/cmd.go
index 86adc325d7..0fd3b3eca2 100644
--- a/lib/diagnostics/cmd.go
+++ b/lib/diagnostics/cmd.go
@@ -8,7 +8,9 @@ import (
"archive/tar"
"bytes"
"context"
+ "crypto/sha256"
_ "embed"
+ "encoding/json"
"flag"
"fmt"
"io"
@@ -16,12 +18,17 @@ import (
"net"
"net/http"
"net/url"
+ "os"
+ "os/exec"
+ "regexp"
"strings"
"time"
"git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/health"
"github.com/sirupsen/logrus"
)
@@ -30,16 +37,20 @@ type Command struct{}
func (Command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
var diag diagnoser
f := flag.NewFlagSet(prog, flag.ContinueOnError)
- f.StringVar(&diag.projectName, "project-name", "scratch area for diagnostics", "name of project to find/create in home project and use for temporary/test objects")
- f.StringVar(&diag.logLevel, "log-level", "info", "logging level (debug, info, warning, error)")
- f.StringVar(&diag.dockerImage, "docker-image", "", "image to use when running a test container (default: use embedded hello-world image)")
+ f.StringVar(&diag.projectName, "project-name", "scratch area for diagnostics", "`name` of project to find/create in home project and use for temporary/test objects")
+ f.StringVar(&diag.logLevel, "log-level", "info", "logging `level` (debug, info, warning, error)")
+ f.StringVar(&diag.dockerImage, "docker-image", "", "`image` (tag or portable data hash) to use when running a test container, or \"hello-world\" to use embedded hello-world image (default: build a custom image containing this executable, and run diagnostics inside the container too)")
+ f.StringVar(&diag.dockerImageFrom, "docker-image-from", "debian:stable-slim", "`base` image to use when building a custom image (see https://doc.arvados.org/main/admin/diagnostics.html#container-options)")
f.BoolVar(&diag.checkInternal, "internal-client", false, "check that this host is considered an \"internal\" client")
f.BoolVar(&diag.checkExternal, "external-client", false, "check that this host is considered an \"external\" client")
+ f.BoolVar(&diag.verbose, "v", false, "verbose: include more information in report")
f.IntVar(&diag.priority, "priority", 500, "priority for test container (1..1000, or 0 to skip)")
f.DurationVar(&diag.timeout, "timeout", 10*time.Second, "timeout for http requests")
if ok, code := cmd.ParseFlags(f, prog, args, "", stderr); !ok {
return code
}
+ diag.stdout = stdout
+ diag.stderr = stderr
diag.logger = ctxlog.New(stdout, "text", diag.logLevel)
diag.logger.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true, DisableLevelTruncation: true, PadLevelText: true})
diag.runtests()
@@ -58,22 +69,25 @@ func (Command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
}
// docker save hello-world > hello-world.tar
+//
//go:embed hello-world.tar
var HelloWorldDockerImage []byte
type diagnoser struct {
- stdout io.Writer
- stderr io.Writer
- logLevel string
- priority int
- projectName string
- dockerImage string
- checkInternal bool
- checkExternal bool
- timeout time.Duration
- logger *logrus.Logger
- errors []string
- done map[int]bool
+ stdout io.Writer
+ stderr io.Writer
+ logLevel string
+ priority int
+ projectName string
+ dockerImage string
+ dockerImageFrom string
+ checkInternal bool
+ checkExternal bool
+ verbose bool
+ timeout time.Duration
+ logger *logrus.Logger
+ errors []string
+ done map[int]bool
}
func (diag *diagnoser) debugf(f string, args ...interface{}) {
@@ -84,6 +98,12 @@ func (diag *diagnoser) infof(f string, args ...interface{}) {
diag.logger.Infof(" ... "+f, args...)
}
+func (diag *diagnoser) verbosef(f string, args ...interface{}) {
+ if diag.verbose {
+ diag.logger.Infof(" ... "+f, args...)
+ }
+}
+
func (diag *diagnoser) warnf(f string, args ...interface{}) {
diag.logger.Warnf(" ... "+f, args...)
}
@@ -119,12 +139,70 @@ func (diag *diagnoser) dotest(id int, title string, fn func() error) {
func (diag *diagnoser) runtests() {
client := arvados.NewClientFromEnv()
+ // Disable auto-retry, use context instead
+ client.Timeout = 0
if client.APIHost == "" || client.AuthToken == "" {
diag.errorf("ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are not set -- aborting without running any tests")
return
}
+ hostname, err := os.Hostname()
+ if err != nil {
+ diag.warnf("error getting hostname: %s")
+ } else {
+ diag.verbosef("hostname = %s", hostname)
+ }
+
+ diag.dotest(5, "running health check (same as `arvados-server check`)", func() error {
+ ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(&bytes.Buffer{}, "text", "info"))
+ ldr.SetupFlags(flag.NewFlagSet("diagnostics", flag.ContinueOnError))
+ cfg, err := ldr.Load()
+ if err != nil {
+ diag.infof("skipping because config could not be loaded: %s", err)
+ return nil
+ }
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ return err
+ }
+ if cluster.SystemRootToken != os.Getenv("ARVADOS_API_TOKEN") {
+ return fmt.Errorf("diagnostics usage error: %s is readable but SystemRootToken does not match $ARVADOS_API_TOKEN (to fix, either run 'arvados-client sudo diagnostics' to load everything from config file, or set ARVADOS_CONFIG=- to load nothing from config file)", ldr.Path)
+ }
+ agg := &health.Aggregator{Cluster: cluster}
+ resp := agg.ClusterHealth()
+ for _, e := range resp.Errors {
+ diag.errorf("health check: %s", e)
+ }
+ if len(resp.Errors) > 0 {
+ diag.infof("consider running `arvados-server check -yaml` for a comprehensive report")
+ }
+ diag.verbosef("reported clock skew = %v", resp.ClockSkew)
+ reported := map[string]bool{}
+ for _, result := range resp.Checks {
+ version := strings.SplitN(result.Metrics.Version, " (go", 2)[0]
+ if version != "" && !reported[version] {
+ diag.verbosef("arvados version = %s", version)
+ reported[version] = true
+ }
+ }
+ reported = map[string]bool{}
+ for _, result := range resp.Checks {
+ if result.Server != "" && !reported[result.Server] {
+ diag.verbosef("http frontend version = %s", result.Server)
+ reported[result.Server] = true
+ }
+ }
+ reported = map[string]bool{}
+ for _, result := range resp.Checks {
+ if sha := result.ConfigSourceSHA256; sha != "" && !reported[sha] {
+ diag.verbosef("config file sha256 = %s", sha)
+ reported[sha] = true
+ }
+ }
+ return nil
+ })
+
var dd arvados.DiscoveryDocument
ddpath := "discovery/v1/apis/arvados/v1/rest"
diag.dotest(10, fmt.Sprintf("getting discovery document from https://%s/%s", client.APIHost, ddpath), func() error {
@@ -134,7 +212,7 @@ func (diag *diagnoser) runtests() {
if err != nil {
return err
}
- diag.debugf("BlobSignatureTTL = %d", dd.BlobSignatureTTL)
+ diag.verbosef("BlobSignatureTTL = %d", dd.BlobSignatureTTL)
return nil
})
@@ -148,7 +226,7 @@ func (diag *diagnoser) runtests() {
if err != nil {
return err
}
- diag.debugf("Collections.BlobSigning = %v", cluster.Collections.BlobSigning)
+ diag.verbosef("Collections.BlobSigning = %v", cluster.Collections.BlobSigning)
cfgOK = true
return nil
})
@@ -161,7 +239,7 @@ func (diag *diagnoser) runtests() {
if err != nil {
return err
}
- diag.debugf("user uuid = %s", user.UUID)
+ diag.verbosef("user uuid = %s", user.UUID)
return nil
})
@@ -250,9 +328,9 @@ func (diag *diagnoser) runtests() {
isInternal := found["proxy"] == 0 && len(keeplist.Items) > 0
isExternal := found["proxy"] > 0 && found["proxy"] == len(keeplist.Items)
if isExternal {
- diag.debugf("controller returned only proxy services, this host is treated as \"external\"")
+ diag.infof("controller returned only proxy services, this host is treated as \"external\"")
} else if isInternal {
- diag.debugf("controller returned only non-proxy services, this host is treated as \"internal\"")
+ diag.infof("controller returned only non-proxy services, this host is treated as \"internal\"")
}
if (diag.checkInternal && !isInternal) || (diag.checkExternal && !isExternal) {
return fmt.Errorf("expecting internal=%v external=%v, but found internal=%v external=%v", diag.checkInternal, diag.checkExternal, isInternal, isExternal)
@@ -329,7 +407,7 @@ func (diag *diagnoser) runtests() {
}
if len(grplist.Items) > 0 {
project = grplist.Items[0]
- diag.debugf("using existing project, uuid = %s", project.UUID)
+ diag.verbosef("using existing project, uuid = %s", project.UUID)
return nil
}
diag.debugf("list groups: ok, no results")
@@ -340,7 +418,7 @@ func (diag *diagnoser) runtests() {
if err != nil {
return fmt.Errorf("create project: %s", err)
}
- diag.debugf("created project, uuid = %s", project.UUID)
+ diag.verbosef("created project, uuid = %s", project.UUID)
return nil
})
@@ -360,7 +438,7 @@ func (diag *diagnoser) runtests() {
if err != nil {
return err
}
- diag.debugf("ok, uuid = %s", collection.UUID)
+ diag.verbosef("ok, uuid = %s", collection.UUID)
return nil
})
@@ -374,37 +452,100 @@ func (diag *diagnoser) runtests() {
}()
}
- // Read hello-world.tar to find image ID, so we can upload it
- // as "sha256:{...}.tar"
+ tempdir, err := ioutil.TempDir("", "arvados-diagnostics")
+ if err != nil {
+ diag.errorf("error creating temp dir: %s", err)
+ return
+ }
+ defer os.RemoveAll(tempdir)
+
var imageSHA2 string
- {
- tr := tar.NewReader(bytes.NewReader(HelloWorldDockerImage))
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- break
- }
+ var dockerImageData []byte
+ if diag.dockerImage != "" || diag.priority < 1 {
+ // We won't be using the self-built docker image, so
+ // don't build it. But we will write the embedded
+ // "hello-world" image to our test collection to test
+ // upload/download, whether or not we're using it as a
+ // docker image.
+ dockerImageData = HelloWorldDockerImage
+
+ if diag.priority > 0 {
+ imageSHA2, err = getSHA2FromImageData(dockerImageData)
if err != nil {
- diag.errorf("internal error/bug: cannot read embedded docker image tar file: %s", err)
+ diag.errorf("internal error/bug: %s", err)
return
}
- if s := strings.TrimSuffix(hdr.Name, ".json"); len(s) == 64 && s != hdr.Name {
- imageSHA2 = s
- }
}
- if imageSHA2 == "" {
- diag.errorf("internal error/bug: cannot find {sha256}.json file in embedded docker image tar file")
+ } else if selfbin, err := os.Readlink("/proc/self/exe"); err != nil {
+ diag.errorf("readlink /proc/self/exe: %s", err)
+ return
+ } else if selfbindata, err := os.ReadFile(selfbin); err != nil {
+ diag.errorf("error reading %s: %s", selfbin, err)
+ return
+ } else {
+ selfbinSha := fmt.Sprintf("%x", sha256.Sum256(selfbindata))
+ tag := "arvados-client-diagnostics:" + selfbinSha[:9]
+ err := os.WriteFile(tempdir+"/arvados-client", selfbindata, 0777)
+ if err != nil {
+ diag.errorf("error writing %s: %s", tempdir+"/arvados-client", err)
+ return
+ }
+
+ dockerfile := "FROM " + diag.dockerImageFrom + "\n"
+ dockerfile += "RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends libfuse2 ca-certificates && apt-get clean\n"
+ dockerfile += "COPY /arvados-client /arvados-client\n"
+ cmd := exec.Command("docker", "build", "--tag", tag, "-f", "-", tempdir)
+ cmd.Stdin = strings.NewReader(dockerfile)
+ cmd.Stdout = diag.stderr
+ cmd.Stderr = diag.stderr
+ err = cmd.Run()
+ if err != nil {
+ diag.errorf("error building docker image: %s", err)
+ return
+ }
+ checkversion, err := exec.Command("docker", "run", tag, "/arvados-client", "version").CombinedOutput()
+ if err != nil {
+ diag.errorf("docker image does not seem to work: %s", err)
+ return
+ }
+ diag.infof("arvados-client version: %s", checkversion)
+
+ buf, err := exec.Command("docker", "inspect", "--format={{.Id}}", tag).Output()
+ if err != nil {
+ diag.errorf("docker inspect --format={{.Id}} %s: %s", tag, err)
+ return
+ }
+ imageSHA2 = min64HexDigits.FindString(string(buf))
+ if len(imageSHA2) != 64 {
+ diag.errorf("docker inspect --format={{.Id}} output %q does not seem to contain sha256 digest", buf)
+ return
+ }
+
+ buf, err = exec.Command("docker", "save", tag).Output()
+ if err != nil {
+ diag.errorf("docker save %s: %s", tag, err)
return
}
+ diag.infof("docker image size is %d", len(buf))
+ dockerImageData = buf
}
+ tarfilename := "sha256:" + imageSHA2 + ".tar"
+
diag.dotest(100, "uploading file via webdav", func() error {
- ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
+ timeout := diag.timeout
+ if len(dockerImageData) > 10<<20 && timeout < time.Minute {
+ // Extend the normal http timeout if we're
+ // uploading a substantial docker image.
+ timeout = time.Minute
+ }
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(timeout))
defer cancel()
if collection.UUID == "" {
return fmt.Errorf("skipping, no test collection")
}
- req, err := http.NewRequestWithContext(ctx, "PUT", cluster.Services.WebDAVDownload.ExternalURL.String()+"c="+collection.UUID+"/sha256:"+imageSHA2+".tar", bytes.NewReader(HelloWorldDockerImage))
+ t0 := time.Now()
+ req, err := http.NewRequestWithContext(ctx, "PUT", cluster.Services.WebDAVDownload.ExternalURL.String()+"c="+collection.UUID+"/"+tarfilename, bytes.NewReader(dockerImageData))
if err != nil {
return fmt.Errorf("BUG? http.NewRequest: %s", err)
}
@@ -417,12 +558,12 @@ func (diag *diagnoser) runtests() {
if resp.StatusCode != http.StatusCreated {
return fmt.Errorf("status %s", resp.Status)
}
- diag.debugf("ok, status %s", resp.Status)
+ diag.verbosef("upload ok, status %s, %f MB/s", resp.Status, float64(len(dockerImageData))/time.Since(t0).Seconds()/1000000)
err = client.RequestAndDecodeContext(ctx, &collection, "GET", "arvados/v1/collections/"+collection.UUID, nil, nil)
if err != nil {
return fmt.Errorf("get updated collection: %s", err)
}
- diag.debugf("ok, pdh %s", collection.PortableDataHash)
+ diag.verbosef("upload pdh %s", collection.PortableDataHash)
return nil
})
@@ -445,11 +586,11 @@ func (diag *diagnoser) runtests() {
fileurl string
}{
{false, false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "foo"},
- {false, false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + "testfile"},
+ {false, false, http.StatusNotFound, strings.Replace(davurl.String(), "*", "d41d8cd98f00b204e9800998ecf8427e-0", 1) + tarfilename},
{false, false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo"},
- {false, false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/testfile"},
- {true, true, http.StatusOK, strings.Replace(davurl.String(), "*", strings.Replace(collection.PortableDataHash, "+", "-", -1), 1) + "testfile"},
- {true, false, http.StatusOK, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=" + collection.UUID + "/_/sha256:" + imageSHA2 + ".tar"},
+ {false, false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=d41d8cd98f00b204e9800998ecf8427e+0/_/" + tarfilename},
+ {true, true, http.StatusOK, strings.Replace(davurl.String(), "*", strings.Replace(collection.PortableDataHash, "+", "-", -1), 1) + tarfilename},
+ {true, false, http.StatusOK, cluster.Services.WebDAVDownload.ExternalURL.String() + "c=" + collection.UUID + "/_/" + tarfilename},
} {
diag.dotest(120+i, fmt.Sprintf("downloading from webdav (%s)", trial.fileurl), func() error {
if trial.needWildcard && !davWildcard {
@@ -478,7 +619,7 @@ func (diag *diagnoser) runtests() {
if resp.StatusCode != trial.status {
return fmt.Errorf("unexpected response status: %s", resp.Status)
}
- if trial.status == http.StatusOK && !bytes.Equal(body, HelloWorldDockerImage) {
+ if trial.status == http.StatusOK && !bytes.Equal(body, dockerImageData) {
excerpt := body
if len(excerpt) > 128 {
excerpt = append([]byte(nil), body[:128]...)
@@ -507,35 +648,6 @@ func (diag *diagnoser) runtests() {
return nil
})
- diag.dotest(140, "getting workbench1 webshell page", func() error {
- ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
- defer cancel()
- if vm.UUID == "" {
- diag.warnf("skipping, no vm available")
- return nil
- }
- webshelltermurl := cluster.Services.Workbench1.ExternalURL.String() + "virtual_machines/" + vm.UUID + "/webshell/testusername"
- diag.debugf("url %s", webshelltermurl)
- req, err := http.NewRequestWithContext(ctx, "GET", webshelltermurl, nil)
- if err != nil {
- return err
- }
- req.Header.Set("Authorization", "Bearer "+client.AuthToken)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return fmt.Errorf("reading response: %s", err)
- }
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("unexpected response status: %s %q", resp.Status, body)
- }
- return nil
- })
-
diag.dotest(150, "connecting to webshell service", func() error {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
defer cancel()
@@ -591,13 +703,26 @@ func (diag *diagnoser) runtests() {
}
timestamp := time.Now().Format(time.RFC3339)
- ctrCommand := []string{"echo", timestamp}
- if diag.dockerImage == "" {
+
+ var ctrCommand []string
+ switch diag.dockerImage {
+ case "":
+ if collection.UUID == "" {
+ return fmt.Errorf("skipping, no test collection to use as docker image")
+ }
+ diag.dockerImage = collection.PortableDataHash
+ ctrCommand = []string{"/arvados-client", "diagnostics",
+ "-priority=0", // don't run a container
+ "-log-level=" + diag.logLevel,
+ "-internal-client=true"}
+ case "hello-world":
if collection.UUID == "" {
return fmt.Errorf("skipping, no test collection to use as docker image")
}
diag.dockerImage = collection.PortableDataHash
ctrCommand = []string{"/hello"}
+ default:
+ ctrCommand = []string{"echo", timestamp}
}
var cr arvados.ContainerRequest
@@ -621,25 +746,25 @@ func (diag *diagnoser) runtests() {
},
},
"runtime_constraints": arvados.RuntimeConstraints{
+ API: true,
VCPUs: 1,
- RAM: 1 << 26,
- KeepCacheRAM: 1 << 26,
+ RAM: 128 << 20,
+ KeepCacheRAM: 64 << 20,
},
}})
if err != nil {
return err
}
- diag.debugf("container request uuid = %s", cr.UUID)
- diag.debugf("container uuid = %s", cr.ContainerUUID)
+ diag.infof("container request uuid = %s", cr.UUID)
+ diag.verbosef("container uuid = %s", cr.ContainerUUID)
timeout := 10 * time.Minute
diag.infof("container request submitted, waiting up to %v for container to run", arvados.Duration(timeout))
- ctx, cancel = context.WithDeadline(context.Background(), time.Now().Add(timeout))
- defer cancel()
+ deadline := time.Now().Add(timeout)
var c arvados.Container
- for ; cr.State != arvados.ContainerRequestStateFinal; time.Sleep(2 * time.Second) {
- ctx, cancel := context.WithDeadline(ctx, time.Now().Add(diag.timeout))
+ for ; cr.State != arvados.ContainerRequestStateFinal && time.Now().Before(deadline); time.Sleep(2 * time.Second) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))
defer cancel()
crStateWas := cr.State
@@ -659,13 +784,61 @@ func (diag *diagnoser) runtests() {
if c.State != cStateWas {
diag.debugf("container state = %s", c.State)
}
+
+ cancel()
}
+ if cr.State != arvados.ContainerRequestStateFinal {
+ err := client.RequestAndDecodeContext(context.Background(), &cr, "PATCH", "arvados/v1/container_requests/"+cr.UUID, nil, map[string]interface{}{
+ "container_request": map[string]interface{}{
+ "priority": 0,
+ }})
+ if err != nil {
+ diag.infof("error canceling container request %s: %s", cr.UUID, err)
+ } else {
+ diag.debugf("canceled container request %s", cr.UUID)
+ }
+ return fmt.Errorf("timed out waiting for container to finish; container request %s state was %q, container %s state was %q", cr.UUID, cr.State, c.UUID, c.State)
+ }
if c.State != arvados.ContainerStateComplete {
return fmt.Errorf("container request %s is final but container %s did not complete: container state = %q", cr.UUID, cr.ContainerUUID, c.State)
- } else if c.ExitCode != 0 {
+ }
+ if c.ExitCode != 0 {
return fmt.Errorf("container exited %d", c.ExitCode)
}
return nil
})
}
+
+func getSHA2FromImageData(dockerImageData []byte) (string, error) {
+ tr := tar.NewReader(bytes.NewReader(dockerImageData))
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ return "", fmt.Errorf("cannot find manifest.json in docker image tar file")
+ }
+ if err != nil {
+ return "", fmt.Errorf("cannot read docker image tar file: %s", err)
+ }
+ if hdr.Name != "manifest.json" {
+ continue
+ }
+ var manifest []struct {
+ Config string
+ }
+ err = json.NewDecoder(tr).Decode(&manifest)
+ if err != nil {
+ return "", fmt.Errorf("cannot read manifest.json from docker image tar file: %s", err)
+ }
+ if len(manifest) == 0 {
+ return "", fmt.Errorf("manifest.json is empty")
+ }
+ s := min64HexDigits.FindString(manifest[0].Config)
+ if len(s) != 64 {
+ return "", fmt.Errorf("found manifest.json but .[0].Config %q does not seem to contain sha256 digest", manifest[0].Config)
+ }
+ return s, nil
+ }
+}
+
+var min64HexDigits = regexp.MustCompile(`[0-9a-f]{64,}`)
diff --git a/lib/diagnostics/docker_image_test.go b/lib/diagnostics/docker_image_test.go
new file mode 100644
index 0000000000..ace4a2c035
--- /dev/null
+++ b/lib/diagnostics/docker_image_test.go
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package diagnostics
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+var _ = Suite(&suite{})
+
+type suite struct{}
+
+func (*suite) TestGetSHA2FromImageData(c *C) {
+ imageSHA2, err := getSHA2FromImageData(HelloWorldDockerImage)
+ c.Check(err, IsNil)
+ c.Check(imageSHA2, Matches, `[0-9a-f]{64}`)
+}
diff --git a/lib/dispatchcloud/cmd.go b/lib/dispatchcloud/cmd.go
index 0254c6526c..81982cdc1a 100644
--- a/lib/dispatchcloud/cmd.go
+++ b/lib/dispatchcloud/cmd.go
@@ -21,6 +21,10 @@ func newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg
if err != nil {
return service.ErrorHandler(ctx, cluster, fmt.Errorf("error initializing client from cluster config: %s", err))
}
+ // Disable auto-retry. We have transient failure recovery at
+ // the application level, so we would rather receive/report
+ // upstream errors right away.
+ ac.Timeout = 0
d := &dispatcher{
Cluster: cluster,
Context: ctx,
diff --git a/lib/dispatchcloud/container/queue.go b/lib/dispatchcloud/container/queue.go
index 938ef915f2..8d8b7ff9af 100644
--- a/lib/dispatchcloud/container/queue.go
+++ b/lib/dispatchcloud/container/queue.go
@@ -15,7 +15,14 @@ import (
"github.com/sirupsen/logrus"
)
-type typeChooser func(*arvados.Container) (arvados.InstanceType, error)
+// Stop fetching queued containers after this many of the highest
+// priority non-supervisor containers. Reduces API load when queue is
+// long. This also limits how quickly a large batch of queued
+// containers can be started, which improves reliability under high
+// load at the cost of increased under light load.
+const queuedContainersTarget = 100
+
+type typeChooser func(*arvados.Container) ([]arvados.InstanceType, error)
// An APIClient performs Arvados API requests. It is typically an
// *arvados.Client.
@@ -27,11 +34,11 @@ type APIClient interface {
// record and the instance type that should be used to run it.
type QueueEnt struct {
// The container to run. Only the UUID, State, Priority,
- // RuntimeConstraints, Mounts, and ContainerImage fields are
- // populated.
- Container arvados.Container `json:"container"`
- InstanceType arvados.InstanceType `json:"instance_type"`
- FirstSeenAt time.Time `json:"first_seen_at"`
+ // RuntimeConstraints, ContainerImage, SchedulingParameters,
+ // and CreatedAt fields are populated.
+ Container arvados.Container `json:"container"`
+ InstanceTypes []arvados.InstanceType `json:"instance_types"`
+ FirstSeenAt time.Time `json:"first_seen_at"`
}
// String implements fmt.Stringer by returning the queued container's
@@ -232,13 +239,30 @@ func (cq *Queue) delEnt(uuid string, state arvados.ContainerState) {
// Caller must have lock.
func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
- it, err := cq.chooseType(&ctr)
+ logger := cq.logger.WithField("ContainerUUID", ctr.UUID)
+ // We didn't ask for the Mounts field when polling
+ // controller/RailsAPI, because it can be expensive on the
+ // Rails side, and most of the time we already have it. But
+ // this is the first time we're seeing this container, so we
+ // need to fetch mounts in order to choose an instance type.
+ err := cq.client.RequestAndDecode(&ctr, "GET", "arvados/v1/containers/"+ctr.UUID, nil, arvados.GetOptions{
+ Select: []string{"mounts"},
+ })
+ if err != nil {
+ logger.WithError(err).Warn("error getting mounts")
+ return
+ }
+ types, err := cq.chooseType(&ctr)
+
+ // Avoid wasting memory on a large Mounts attr (we don't need
+ // it after choosing type).
+ ctr.Mounts = nil
+
if err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) {
// We assume here that any chooseType error is a hard
// error: it wouldn't help to try again, or to leave
// it for a different dispatcher process to attempt.
errorString := err.Error()
- logger := cq.logger.WithField("ContainerUUID", ctr.UUID)
logger.WithError(err).Warn("cancel container with no suitable instance type")
go func() {
if ctr.State == arvados.ContainerStateQueued {
@@ -280,13 +304,20 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
}()
return
}
+ typeNames := ""
+ for _, it := range types {
+ if typeNames != "" {
+ typeNames += ", "
+ }
+ typeNames += it.Name
+ }
cq.logger.WithFields(logrus.Fields{
"ContainerUUID": ctr.UUID,
"State": ctr.State,
"Priority": ctr.Priority,
- "InstanceType": it.Name,
+ "InstanceTypes": typeNames,
}).Info("adding container to queue")
- cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it, FirstSeenAt: time.Now()}
+ cq.current[uuid] = QueueEnt{Container: ctr, InstanceTypes: types, FirstSeenAt: time.Now()}
}
// Lock acquires the dispatch lock for the given container.
@@ -384,7 +415,7 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) {
*next[upd.UUID] = upd
}
}
- selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "mounts", "scheduling_parameters", "created_at"}
+ selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "scheduling_parameters", "created_at"}
limitParam := 1000
mine, err := cq.fetchAll(arvados.ResourceListParams{
@@ -393,7 +424,7 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) {
Limit: &limitParam,
Count: "none",
Filters: []arvados.Filter{{"locked_by_uuid", "=", auth.UUID}},
- })
+ }, 0)
if err != nil {
return nil, err
}
@@ -401,16 +432,23 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) {
avail, err := cq.fetchAll(arvados.ResourceListParams{
Select: selectParam,
- Order: "uuid",
+ Order: "priority desc",
Limit: &limitParam,
Count: "none",
Filters: []arvados.Filter{{"state", "=", arvados.ContainerStateQueued}, {"priority", ">", "0"}},
- })
+ }, queuedContainersTarget)
if err != nil {
return nil, err
}
apply(avail)
+ // Check for containers that we already know about but weren't
+ // returned by any of the above queries, and fetch them
+ // explicitly by UUID. If they're in a final state we can drop
+ // them, but otherwise we need to apply updates, e.g.,
+ //
+ // - Queued container priority has been reduced
+ // - Locked container has been requeued with lower priority
missing := map[string]bool{}
cq.mtx.Lock()
for uuid, ent := range cq.current {
@@ -436,7 +474,7 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) {
Order: "uuid",
Count: "none",
Filters: filters,
- })
+ }, 0)
if err != nil {
return nil, err
}
@@ -471,10 +509,18 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) {
return next, nil
}
-func (cq *Queue) fetchAll(initialParams arvados.ResourceListParams) ([]arvados.Container, error) {
+// Fetch all pages of containers.
+//
+// Except: if maxNonSuper>0, stop fetching more pages after receving
+// that many non-supervisor containers. Along with {Order: "priority
+// desc"}, this enables fetching enough high priority scheduling-ready
+// containers to make progress, without necessarily fetching the
+// entire queue.
+func (cq *Queue) fetchAll(initialParams arvados.ResourceListParams, maxNonSuper int) ([]arvados.Container, error) {
var results []arvados.Container
params := initialParams
params.Offset = 0
+ nonSuper := 0
for {
// This list variable must be a new one declared
// inside the loop: otherwise, items in the API
@@ -490,8 +536,23 @@ func (cq *Queue) fetchAll(initialParams arvados.ResourceListParams) ([]arvados.C
break
}
+ // Conserve memory by deleting mounts that aren't
+ // relevant to choosing the instance type.
+ for _, c := range list.Items {
+ for path, mnt := range c.Mounts {
+ if mnt.Kind != "tmp" {
+ delete(c.Mounts, path)
+ }
+ }
+ if !c.SchedulingParameters.Supervisor {
+ nonSuper++
+ }
+ }
+
results = append(results, list.Items...)
- if len(params.Order) == 1 && params.Order == "uuid" {
+ if maxNonSuper > 0 && nonSuper >= maxNonSuper {
+ break
+ } else if params.Order == "uuid" {
params.Filters = append(initialParams.Filters, arvados.Filter{"uuid", ">", list.Items[len(list.Items)-1].UUID})
} else {
params.Offset += len(list.Items)
@@ -523,7 +584,7 @@ func (cq *Queue) runMetrics(reg *prometheus.Registry) {
}
ents, _ := cq.Entries()
for _, ent := range ents {
- count[entKey{ent.Container.State, ent.InstanceType.Name}]++
+ count[entKey{ent.Container.State, ent.InstanceTypes[0].Name}]++
}
for k, v := range count {
mEntries.WithLabelValues(string(k.state), k.inst).Set(float64(v))
diff --git a/lib/dispatchcloud/container/queue_test.go b/lib/dispatchcloud/container/queue_test.go
index 0075ee324e..928c6dd8c8 100644
--- a/lib/dispatchcloud/container/queue_test.go
+++ b/lib/dispatchcloud/container/queue_test.go
@@ -40,8 +40,9 @@ func (suite *IntegrationSuite) TearDownTest(c *check.C) {
}
func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
- typeChooser := func(ctr *arvados.Container) (arvados.InstanceType, error) {
- return arvados.InstanceType{Name: "testType"}, nil
+ typeChooser := func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
+ c.Check(ctr.Mounts["/tmp"].Capacity, check.Equals, int64(24000000000))
+ return []arvados.InstanceType{{Name: "testType"}}, nil
}
client := arvados.NewClientFromEnv()
@@ -61,9 +62,12 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
var wg sync.WaitGroup
for uuid, ent := range ents {
c.Check(ent.Container.UUID, check.Equals, uuid)
- c.Check(ent.InstanceType.Name, check.Equals, "testType")
+ c.Check(ent.InstanceTypes, check.HasLen, 1)
+ c.Check(ent.InstanceTypes[0].Name, check.Equals, "testType")
c.Check(ent.Container.State, check.Equals, arvados.ContainerStateQueued)
c.Check(ent.Container.Priority > 0, check.Equals, true)
+ // Mounts should be deleted to avoid wasting memory
+ c.Check(ent.Container.Mounts, check.IsNil)
ctr, ok := cq.Get(uuid)
c.Check(ok, check.Equals, true)
@@ -105,7 +109,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
}
func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
- errorTypeChooser := func(ctr *arvados.Container) (arvados.InstanceType, error) {
+ errorTypeChooser := func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
// Make sure the relevant container fields are
// actually populated.
c.Check(ctr.ContainerImage, check.Equals, "test")
@@ -113,7 +117,7 @@ func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
c.Check(ctr.RuntimeConstraints.RAM, check.Equals, int64(12000000000))
c.Check(ctr.Mounts["/tmp"].Capacity, check.Equals, int64(24000000000))
c.Check(ctr.Mounts["/var/spool/cwl"].Capacity, check.Equals, int64(24000000000))
- return arvados.InstanceType{}, errors.New("no suitable instance type")
+ return nil, errors.New("no suitable instance type")
}
client := arvados.NewClientFromEnv()
diff --git a/lib/dispatchcloud/dispatcher.go b/lib/dispatchcloud/dispatcher.go
index ae91a710e3..04283df48f 100644
--- a/lib/dispatchcloud/dispatcher.go
+++ b/lib/dispatchcloud/dispatcher.go
@@ -15,6 +15,9 @@ import (
"time"
"git.arvados.org/arvados.git/lib/cloud"
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/lib/dispatchcloud/container"
"git.arvados.org/arvados.git/lib/dispatchcloud/scheduler"
"git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
@@ -53,18 +56,27 @@ type dispatcher struct {
Registry *prometheus.Registry
InstanceSetID cloud.InstanceSetID
+ dbConnector ctrlctx.DBConnector
logger logrus.FieldLogger
instanceSet cloud.InstanceSet
pool pool
queue scheduler.ContainerQueue
+ sched *scheduler.Scheduler
httpHandler http.Handler
sshKey ssh.Signer
setupOnce sync.Once
stop chan struct{}
stopped chan struct{}
+
+ schedQueueMtx sync.Mutex
+ schedQueueRefreshed time.Time
+ schedQueue []scheduler.QueueEnt
+ schedQueueMap map[string]scheduler.QueueEnt
}
+var schedQueueRefresh = time.Second
+
// Start starts the dispatcher. Start can be called multiple times
// with no ill effect.
func (disp *dispatcher) Start() {
@@ -107,7 +119,7 @@ func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
return exr
}
-func (disp *dispatcher) typeChooser(ctr *arvados.Container) (arvados.InstanceType, error) {
+func (disp *dispatcher) typeChooser(ctr *arvados.Container) ([]arvados.InstanceType, error) {
return ChooseInstanceType(disp.Cluster, ctr)
}
@@ -118,6 +130,7 @@ func (disp *dispatcher) setup() {
func (disp *dispatcher) initialize() {
disp.logger = ctxlog.FromContext(disp.Context)
+ disp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.Cluster.PostgreSQL}
disp.ArvClient.AuthToken = disp.AuthToken
@@ -133,19 +146,39 @@ func (disp *dispatcher) initialize() {
disp.stop = make(chan struct{}, 1)
disp.stopped = make(chan struct{})
- if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Containers.DispatchPrivateKey)); err != nil {
+ if key, err := config.LoadSSHKey(disp.Cluster.Containers.DispatchPrivateKey); err != nil {
disp.logger.Fatalf("error parsing configured Containers.DispatchPrivateKey: %s", err)
} else {
disp.sshKey = key
}
+ installPublicKey := disp.sshKey.PublicKey()
+ if !disp.Cluster.Containers.CloudVMs.DeployPublicKey {
+ installPublicKey = nil
+ }
instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger, disp.Registry)
if err != nil {
disp.logger.Fatalf("error initializing driver: %s", err)
}
+ dblock.Dispatch.Lock(disp.Context, disp.dbConnector.GetDB)
disp.instanceSet = instanceSet
- disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.Registry, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
- disp.queue = container.NewQueue(disp.logger, disp.Registry, disp.typeChooser, disp.ArvClient)
+ disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.Registry, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, installPublicKey, disp.Cluster)
+ if disp.queue == nil {
+ disp.queue = container.NewQueue(disp.logger, disp.Registry, disp.typeChooser, disp.ArvClient)
+ }
+
+ staleLockTimeout := time.Duration(disp.Cluster.Containers.StaleLockTimeout)
+ if staleLockTimeout == 0 {
+ staleLockTimeout = defaultStaleLockTimeout
+ }
+ pollInterval := time.Duration(disp.Cluster.Containers.CloudVMs.PollInterval)
+ if pollInterval <= 0 {
+ pollInterval = defaultPollInterval
+ }
+ disp.sched = scheduler.New(disp.Context, disp.ArvClient, disp.queue, disp.pool, disp.Registry, staleLockTimeout, pollInterval,
+ disp.Cluster.Containers.CloudVMs.InitialQuotaEstimate,
+ disp.Cluster.Containers.CloudVMs.MaxInstances,
+ disp.Cluster.Containers.CloudVMs.SupervisorFraction)
if disp.Cluster.ManagementToken == "" {
disp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -154,6 +187,7 @@ func (disp *dispatcher) initialize() {
} else {
mux := httprouter.New()
mux.HandlerFunc("GET", "/arvados/v1/dispatch/containers", disp.apiContainers)
+ mux.HandlerFunc("GET", "/arvados/v1/dispatch/container", disp.apiContainer)
mux.HandlerFunc("POST", "/arvados/v1/dispatch/containers/kill", disp.apiContainerKill)
mux.HandlerFunc("GET", "/arvados/v1/dispatch/instances", disp.apiInstances)
mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/hold", disp.apiInstanceHold)
@@ -175,37 +209,58 @@ func (disp *dispatcher) initialize() {
}
func (disp *dispatcher) run() {
+ defer dblock.Dispatch.Unlock()
defer close(disp.stopped)
defer disp.instanceSet.Stop()
defer disp.pool.Stop()
- staleLockTimeout := time.Duration(disp.Cluster.Containers.StaleLockTimeout)
- if staleLockTimeout == 0 {
- staleLockTimeout = defaultStaleLockTimeout
- }
- pollInterval := time.Duration(disp.Cluster.Containers.CloudVMs.PollInterval)
- if pollInterval <= 0 {
- pollInterval = defaultPollInterval
- }
- sched := scheduler.New(disp.Context, disp.queue, disp.pool, disp.Registry, staleLockTimeout, pollInterval)
- sched.Start()
- defer sched.Stop()
+ disp.sched.Start()
+ defer disp.sched.Stop()
<-disp.stop
}
-// Management API: all active and queued containers.
+// Get a snapshot of the scheduler's queue, no older than
+// schedQueueRefresh.
+//
+// First return value is in the sorted order used by the scheduler.
+// Second return value is a map of the same entries, for efficiently
+// looking up a single container.
+func (disp *dispatcher) schedQueueCurrent() ([]scheduler.QueueEnt, map[string]scheduler.QueueEnt) {
+ disp.schedQueueMtx.Lock()
+ defer disp.schedQueueMtx.Unlock()
+ if time.Since(disp.schedQueueRefreshed) > schedQueueRefresh {
+ disp.schedQueue = disp.sched.Queue()
+ disp.schedQueueMap = make(map[string]scheduler.QueueEnt)
+ for _, ent := range disp.schedQueue {
+ disp.schedQueueMap[ent.Container.UUID] = ent
+ }
+ disp.schedQueueRefreshed = time.Now()
+ }
+ return disp.schedQueue, disp.schedQueueMap
+}
+
+// Management API: scheduling queue entries for all active and queued
+// containers.
func (disp *dispatcher) apiContainers(w http.ResponseWriter, r *http.Request) {
var resp struct {
- Items []container.QueueEnt `json:"items"`
- }
- qEntries, _ := disp.queue.Entries()
- for _, ent := range qEntries {
- resp.Items = append(resp.Items, ent)
+ Items []scheduler.QueueEnt `json:"items"`
}
+ resp.Items, _ = disp.schedQueueCurrent()
json.NewEncoder(w).Encode(resp)
}
+// Management API: scheduling queue entry for a specified container.
+func (disp *dispatcher) apiContainer(w http.ResponseWriter, r *http.Request) {
+ _, sq := disp.schedQueueCurrent()
+ ent, ok := sq[r.FormValue("container_uuid")]
+ if !ok {
+ httpserver.Error(w, "container not found", http.StatusNotFound)
+ return
+ }
+ json.NewEncoder(w).Encode(ent)
+}
+
// Management API: all active instances (cloud VMs).
func (disp *dispatcher) apiInstances(w http.ResponseWriter, r *http.Request) {
var resp struct {
diff --git a/lib/dispatchcloud/dispatcher_test.go b/lib/dispatchcloud/dispatcher_test.go
index 829a053636..d651e73a67 100644
--- a/lib/dispatchcloud/dispatcher_test.go
+++ b/lib/dispatchcloud/dispatcher_test.go
@@ -6,15 +6,21 @@ package dispatchcloud
import (
"context"
+ "crypto/tls"
"encoding/json"
+ "fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptest"
+ "net/url"
"os"
+ "strings"
"sync"
+ "sync/atomic"
"time"
+ "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
@@ -27,11 +33,12 @@ import (
var _ = check.Suite(&DispatcherSuite{})
type DispatcherSuite struct {
- ctx context.Context
- cancel context.CancelFunc
- cluster *arvados.Cluster
- stubDriver *test.StubDriver
- disp *dispatcher
+ ctx context.Context
+ cancel context.CancelFunc
+ cluster *arvados.Cluster
+ stubDriver *test.StubDriver
+ disp *dispatcher
+ error503Server *httptest.Server
}
func (s *DispatcherSuite) SetUpTest(c *check.C) {
@@ -45,18 +52,30 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
s.stubDriver = &test.StubDriver{
HostKey: hostpriv,
AuthorizedKeys: []ssh.PublicKey{dispatchpub},
+ ErrorRateCreate: 0.1,
ErrorRateDestroy: 0.1,
MinTimeBetweenCreateCalls: time.Millisecond,
+ QuotaMaxInstances: 10,
}
+ // We need the postgresql connection info from the integration
+ // test config.
+ cfg, err := config.NewLoader(nil, ctxlog.FromContext(s.ctx)).Load()
+ c.Assert(err, check.IsNil)
+ testcluster, err := cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+
s.cluster = &arvados.Cluster{
ManagementToken: "test-management-token",
+ PostgreSQL: testcluster.PostgreSQL,
Containers: arvados.ContainersConfig{
CrunchRunCommand: "crunch-run",
CrunchRunArgumentsList: []string{"--foo", "--extra='args'"},
DispatchPrivateKey: string(dispatchprivraw),
StaleLockTimeout: arvados.Duration(5 * time.Millisecond),
RuntimeEngine: "stub",
+ MaxDispatchAttempts: 10,
+ MaximumPriceFactor: 1.5,
CloudVMs: arvados.CloudVMsConfig{
Driver: "test",
SyncInterval: arvados.Duration(10 * time.Millisecond),
@@ -65,6 +84,7 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
TimeoutProbe: arvados.Duration(15 * time.Millisecond),
TimeoutShutdown: arvados.Duration(5 * time.Millisecond),
MaxCloudOpsPerSecond: 500,
+ InitialQuotaEstimate: 8,
PollInterval: arvados.Duration(5 * time.Millisecond),
ProbeInterval: arvados.Duration(5 * time.Millisecond),
MaxProbesPerSecond: 1000,
@@ -89,7 +109,19 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
arvadostest.SetServiceURL(&s.cluster.Services.Controller, "https://"+os.Getenv("ARVADOS_API_HOST")+"/")
arvClient, err := arvados.NewClientFromConfig(s.cluster)
- c.Check(err, check.IsNil)
+ c.Assert(err, check.IsNil)
+ // Disable auto-retry
+ arvClient.Timeout = 0
+
+ s.error503Server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c.Logf("503 stub: returning 503")
+ w.WriteHeader(http.StatusServiceUnavailable)
+ }))
+ arvClient.Client = &http.Client{
+ Transport: &http.Transport{
+ Proxy: s.arvClientProxy(c),
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true}}}
s.disp = &dispatcher{
Cluster: s.cluster,
@@ -97,6 +129,10 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
ArvClient: arvClient,
AuthToken: arvadostest.AdminToken,
Registry: prometheus.NewRegistry(),
+ // Providing a stub queue here prevents
+ // disp.initialize() from making a real one that uses
+ // the integration test servers/database.
+ queue: &test.Queue{},
}
// Test cases can modify s.cluster before calling
// initialize(), and then modify private state before calling
@@ -106,6 +142,21 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
func (s *DispatcherSuite) TearDownTest(c *check.C) {
s.cancel()
s.disp.Close()
+ s.error503Server.Close()
+}
+
+// Intercept outgoing API requests for "/503" and respond HTTP
+// 503. This lets us force (*arvados.Client)Last503() to return
+// something.
+func (s *DispatcherSuite) arvClientProxy(c *check.C) func(*http.Request) (*url.URL, error) {
+ return func(req *http.Request) (*url.URL, error) {
+ if req.URL.Path == "/503" {
+ c.Logf("arvClientProxy: proxying to 503 stub")
+ return url.Parse(s.error503Server.URL)
+ } else {
+ return nil, nil
+ }
+ }
}
// DispatchToStubDriver checks that the dispatcher wires everything
@@ -114,9 +165,9 @@ func (s *DispatcherSuite) TearDownTest(c *check.C) {
// artificial errors in order to exercise a variety of code paths.
func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
Drivers["test"] = s.stubDriver
- s.disp.setupOnce.Do(s.disp.initialize)
queue := &test.Queue{
- ChooseType: func(ctr *arvados.Container) (arvados.InstanceType, error) {
+ MaxDispatchAttempts: 5,
+ ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
return ChooseInstanceType(s.cluster, ctr)
},
Logger: ctxlog.TestLogger(c),
@@ -133,6 +184,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
})
}
s.disp.queue = queue
+ s.disp.setupOnce.Do(s.disp.initialize)
var mtx sync.Mutex
done := make(chan struct{})
@@ -148,6 +200,11 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
return
}
delete(waiting, ctr.UUID)
+ if len(waiting) == 100 {
+ // trigger scheduler maxConcurrency limit
+ c.Logf("test: requesting 503 in order to trigger maxConcurrency limit")
+ s.disp.ArvClient.RequestAndDecode(nil, "GET", "503", nil, nil)
+ }
if len(waiting) == 0 {
close(done)
}
@@ -156,26 +213,51 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
finishContainer(ctr)
return int(rand.Uint32() & 0x3)
}
- n := 0
+ var type4BrokenUntil time.Time
+ var countCapacityErrors int64
+ vmCount := int32(0)
s.stubDriver.Queue = queue
- s.stubDriver.SetupVM = func(stubvm *test.StubVM) {
- n++
+ s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
+ if pt := stubvm.Instance().ProviderType(); pt == test.InstanceType(6).ProviderType {
+ c.Logf("test: returning capacity error for instance type %s", pt)
+ atomic.AddInt64(&countCapacityErrors, 1)
+ return test.CapacityError{InstanceTypeSpecific: true}
+ }
+ n := atomic.AddInt32(&vmCount, 1)
+ c.Logf("SetupVM: instance %s n=%d", stubvm.Instance(), n)
stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
stubvm.ExecuteContainer = executeContainer
stubvm.CrashRunningContainer = finishContainer
stubvm.ExtraCrunchRunArgs = "'--runtime-engine=stub' '--foo' '--extra='\\''args'\\'''"
- switch n % 7 {
- case 0:
+ switch {
+ case stubvm.Instance().ProviderType() == test.InstanceType(4).ProviderType &&
+ (type4BrokenUntil.IsZero() || time.Now().Before(type4BrokenUntil)):
+ // Initially (at least 2*TimeoutBooting), all
+ // instances of this type are completely
+ // broken. This ensures the
+ // boot_outcomes{outcome="failure"} metric is
+ // not zero.
+ stubvm.Broken = time.Now()
+ if type4BrokenUntil.IsZero() {
+ type4BrokenUntil = time.Now().Add(2 * s.cluster.Containers.CloudVMs.TimeoutBooting.Duration())
+ }
+ case n%7 == 0:
+ // some instances start out OK but then stop
+ // running any commands
stubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)
- case 1:
+ case n%7 == 1:
+ // some instances never pass a run-probe
stubvm.CrunchRunMissing = true
- case 2:
+ case n%7 == 2:
+ // some instances start out OK but then start
+ // reporting themselves as broken
stubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)
default:
stubvm.CrunchRunCrashRate = 0.1
stubvm.ArvMountDeadlockRate = 0.1
}
+ return nil
}
s.stubDriver.Bugf = c.Errorf
@@ -184,12 +266,18 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
err := s.disp.CheckHealth()
c.Check(err, check.IsNil)
- select {
- case <-done:
- c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
- case <-time.After(10 * time.Second):
- c.Fatalf("timed out; still waiting for %d containers: %q", len(waiting), waiting)
+ for len(waiting) > 0 {
+ waswaiting := len(waiting)
+ select {
+ case <-done:
+ // loop will end because len(waiting)==0
+ case <-time.After(5 * time.Second):
+ if len(waiting) >= waswaiting {
+ c.Fatalf("timed out; no progress in 5 s while waiting for %d containers: %q", len(waiting), waiting)
+ }
+ }
}
+ c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
deadline := time.Now().Add(5 * time.Second)
for range time.NewTicker(10 * time.Millisecond).C {
@@ -205,6 +293,8 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
}
}
+ c.Check(countCapacityErrors, check.Not(check.Equals), int64(0))
+
req := httptest.NewRequest("GET", "/metrics", nil)
req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
resp := httptest.NewRecorder()
@@ -215,7 +305,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="0",operation="Destroy"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="Create"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error="1",operation="List"} 0\n.*`)
- c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="aborted"} 0.*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="aborted"} [0-9]+\n.*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="disappeared"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="failure"} [^0].*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome="success"} [^0].*`)
@@ -235,13 +325,14 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*last_503_time [1-9][0-9e+.]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*max_concurrent_containers [1-9][0-9e+.]*`)
}
-func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
+func (s *DispatcherSuite) TestManagementAPI_Permissions(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
- s.disp.queue = &test.Queue{}
go s.disp.run()
for _, token := range []string{"abc", ""} {
@@ -259,11 +350,10 @@ func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
}
}
-func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
+func (s *DispatcherSuite) TestManagementAPI_Disabled(c *check.C) {
s.cluster.ManagementToken = ""
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
- s.disp.queue = &test.Queue{}
go s.disp.run()
for _, token := range []string{"abc", ""} {
@@ -277,13 +367,121 @@ func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
}
}
-func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
+func (s *DispatcherSuite) TestManagementAPI_Containers(c *check.C) {
+ s.cluster.ManagementToken = "abcdefgh"
+ s.cluster.Containers.CloudVMs.InitialQuotaEstimate = 4
+ Drivers["test"] = s.stubDriver
+ queue := &test.Queue{
+ MaxDispatchAttempts: 5,
+ ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
+ return ChooseInstanceType(s.cluster, ctr)
+ },
+ Logger: ctxlog.TestLogger(c),
+ }
+ s.stubDriver.Queue = queue
+ s.stubDriver.QuotaMaxInstances = 4
+ s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
+ if stubvm.Instance().ProviderType() >= test.InstanceType(4).ProviderType {
+ return test.CapacityError{InstanceTypeSpecific: true}
+ }
+ stubvm.ExecuteContainer = func(ctr arvados.Container) int {
+ time.Sleep(5 * time.Second)
+ return 0
+ }
+ return nil
+ }
+ s.disp.queue = queue
+ s.disp.setupOnce.Do(s.disp.initialize)
+
+ go s.disp.run()
+
+ type queueEnt struct {
+ Container arvados.Container
+ InstanceType arvados.InstanceType `json:"instance_type"`
+ SchedulingStatus string `json:"scheduling_status"`
+ }
+ type containersResponse struct {
+ Items []queueEnt
+ }
+ getContainers := func() containersResponse {
+ schedQueueRefresh = time.Millisecond
+ req := httptest.NewRequest("GET", "/arvados/v1/dispatch/containers", nil)
+ req.Header.Set("Authorization", "Bearer abcdefgh")
+ resp := httptest.NewRecorder()
+ s.disp.ServeHTTP(resp, req)
+ var cresp containersResponse
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ err := json.Unmarshal(resp.Body.Bytes(), &cresp)
+ c.Check(err, check.IsNil)
+ return cresp
+ }
+
+ c.Check(getContainers().Items, check.HasLen, 0)
+
+ for i := 0; i < 20; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ State: arvados.ContainerStateQueued,
+ Priority: int64(100 - i),
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ RAM: int64(i%3+1) << 30,
+ VCPUs: i%8 + 1,
+ },
+ })
+ }
+ queue.Update()
+
+ expect := `
+ 0 zzzzz-dz642-000000000000000 (Running) ""
+ 1 zzzzz-dz642-000000000000001 (Running) ""
+ 2 zzzzz-dz642-000000000000002 (Locked) "waiting for suitable instance type to become available: queue position 1"
+ 3 zzzzz-dz642-000000000000003 (Locked) "waiting for suitable instance type to become available: queue position 2"
+ 4 zzzzz-dz642-000000000000004 (Queued) "waiting while cluster is running at capacity: queue position 3"
+ 5 zzzzz-dz642-000000000000005 (Queued) "waiting while cluster is running at capacity: queue position 4"
+ 6 zzzzz-dz642-000000000000006 (Queued) "waiting while cluster is running at capacity: queue position 5"
+ 7 zzzzz-dz642-000000000000007 (Queued) "waiting while cluster is running at capacity: queue position 6"
+ 8 zzzzz-dz642-000000000000008 (Queued) "waiting while cluster is running at capacity: queue position 7"
+ 9 zzzzz-dz642-000000000000009 (Queued) "waiting while cluster is running at capacity: queue position 8"
+ 10 zzzzz-dz642-000000000000010 (Queued) "waiting while cluster is running at capacity: queue position 9"
+ 11 zzzzz-dz642-000000000000011 (Queued) "waiting while cluster is running at capacity: queue position 10"
+ 12 zzzzz-dz642-000000000000012 (Queued) "waiting while cluster is running at capacity: queue position 11"
+ 13 zzzzz-dz642-000000000000013 (Queued) "waiting while cluster is running at capacity: queue position 12"
+ 14 zzzzz-dz642-000000000000014 (Queued) "waiting while cluster is running at capacity: queue position 13"
+ 15 zzzzz-dz642-000000000000015 (Queued) "waiting while cluster is running at capacity: queue position 14"
+ 16 zzzzz-dz642-000000000000016 (Queued) "waiting while cluster is running at capacity: queue position 15"
+ 17 zzzzz-dz642-000000000000017 (Queued) "waiting while cluster is running at capacity: queue position 16"
+ 18 zzzzz-dz642-000000000000018 (Queued) "waiting while cluster is running at capacity: queue position 17"
+ 19 zzzzz-dz642-000000000000019 (Queued) "waiting while cluster is running at capacity: queue position 18"
+`
+ sequence := make(map[string][]string)
+ var summary string
+ for deadline := time.Now().Add(time.Second); time.Now().Before(deadline); time.Sleep(time.Millisecond) {
+ cresp := getContainers()
+ summary = "\n"
+ for i, ent := range cresp.Items {
+ summary += fmt.Sprintf("% 2d %s (%s) %q\n", i, ent.Container.UUID, ent.Container.State, ent.SchedulingStatus)
+ s := sequence[ent.Container.UUID]
+ if len(s) == 0 || s[len(s)-1] != ent.SchedulingStatus {
+ sequence[ent.Container.UUID] = append(s, ent.SchedulingStatus)
+ }
+ }
+ if summary == expect {
+ break
+ }
+ }
+ c.Check(summary, check.Equals, expect)
+ for i := 0; i < 5; i++ {
+ c.Logf("sequence for container %d:\n... %s", i, strings.Join(sequence[test.ContainerUUID(i)], "\n... "))
+ }
+}
+
+func (s *DispatcherSuite) TestManagementAPI_Instances(c *check.C) {
s.cluster.ManagementToken = "abcdefgh"
s.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
Drivers["test"] = s.stubDriver
s.disp.setupOnce.Do(s.disp.initialize)
- s.disp.queue = &test.Queue{}
go s.disp.run()
+ defer s.disp.Close()
type instance struct {
Instance string
@@ -311,6 +509,7 @@ func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
sr := getInstances()
c.Check(len(sr.Items), check.Equals, 0)
+ s.stubDriver.ErrorRateCreate = 0
ch := s.disp.pool.Subscribe()
defer s.disp.pool.Unsubscribe(ch)
ok := s.disp.pool.Create(test.InstanceType(1))
diff --git a/lib/dispatchcloud/driver.go b/lib/dispatchcloud/driver.go
index 93515defb7..44adc23fd3 100644
--- a/lib/dispatchcloud/driver.go
+++ b/lib/dispatchcloud/driver.go
@@ -33,7 +33,7 @@ func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger
return nil, fmt.Errorf("unsupported cloud driver %q", cluster.Containers.CloudVMs.Driver)
}
sharedResourceTags := cloud.SharedResourceTags(cluster.Containers.CloudVMs.ResourceTags)
- is, err := driver.InstanceSet(cluster.Containers.CloudVMs.DriverParameters, setID, sharedResourceTags, logger)
+ is, err := driver.InstanceSet(cluster.Containers.CloudVMs.DriverParameters, setID, sharedResourceTags, logger, reg)
is = newInstrumentedInstanceSet(is, reg)
if maxops := cluster.Containers.CloudVMs.MaxCloudOpsPerSecond; maxops > 0 {
is = rateLimitedInstanceSet{
diff --git a/lib/dispatchcloud/node_size.go b/lib/dispatchcloud/node_size.go
index 7c7643bfc7..802bc65c28 100644
--- a/lib/dispatchcloud/node_size.go
+++ b/lib/dispatchcloud/node_size.go
@@ -6,6 +6,7 @@ package dispatchcloud
import (
"errors"
+ "math"
"regexp"
"sort"
"strconv"
@@ -56,7 +57,7 @@ func estimateDockerImageSize(collectionPDH string) int64 {
// EstimateScratchSpace estimates how much available disk space (in
// bytes) is needed to run the container by summing the capacity
// requested by 'tmp' mounts plus disk space required to load the
-// Docker image.
+// Docker image plus arv-mount block cache.
func EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {
for _, m := range ctr.Mounts {
if m.Kind == "tmp" {
@@ -80,6 +81,9 @@ func EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {
// Now reserve space for the extracted image on disk.
needScratch += dockerImageSize
+ // Now reserve space the arv-mount disk cache
+ needScratch += ctr.RuntimeConstraints.KeepCacheDisk
+
return
}
@@ -96,12 +100,16 @@ func versionLess(vs1 string, vs2 string) (bool, error) {
return v1 < v2, nil
}
-// ChooseInstanceType returns the cheapest available
-// arvados.InstanceType big enough to run ctr.
-func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
+// ChooseInstanceType returns the arvados.InstanceTypes eligible to
+// run ctr, i.e., those that have enough RAM, VCPUs, etc., and are not
+// too expensive according to cluster configuration.
+//
+// The returned types are sorted with lower prices first.
+//
+// The error is non-nil if and only if the returned slice is empty.
+func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) ([]arvados.InstanceType, error) {
if len(cc.InstanceTypes) == 0 {
- err = ErrInstanceTypesNotConfigured
- return
+ return nil, ErrInstanceTypesNotConfigured
}
needScratch := EstimateScratchSpace(ctr)
@@ -110,34 +118,41 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
needRAM += int64(cc.Containers.ReserveExtraRAM)
- needRAM += int64(cc.Containers.LocalKeepBlobBuffersPerVCPU * needVCPUs * (1 << 26))
+ if cc.Containers.LocalKeepBlobBuffersPerVCPU > 0 {
+ // + 200 MiB for keepstore process + 10% for GOGC=10
+ needRAM += 220 << 20
+ // + 64 MiB for each blob buffer + 10% for GOGC=10
+ needRAM += int64(cc.Containers.LocalKeepBlobBuffersPerVCPU * needVCPUs * (1 << 26) * 11 / 10)
+ }
needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
- ok := false
+ maxPriceFactor := math.Max(cc.Containers.MaximumPriceFactor, 1)
+ var types []arvados.InstanceType
+ var maxPrice float64
for _, it := range cc.InstanceTypes {
driverInsuff, driverErr := versionLess(it.CUDA.DriverVersion, ctr.RuntimeConstraints.CUDA.DriverVersion)
capabilityInsuff, capabilityErr := versionLess(it.CUDA.HardwareCapability, ctr.RuntimeConstraints.CUDA.HardwareCapability)
switch {
// reasons to reject a node
- case ok && it.Price > best.Price: // already selected a node, and this one is more expensive
+ case maxPrice > 0 && it.Price > maxPrice: // too expensive
case int64(it.Scratch) < needScratch: // insufficient scratch
case int64(it.RAM) < needRAM: // insufficient RAM
case it.VCPUs < needVCPUs: // insufficient VCPUs
case it.Preemptible != ctr.SchedulingParameters.Preemptible: // wrong preemptable setting
- case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs): // same price, worse specs
case it.CUDA.DeviceCount < ctr.RuntimeConstraints.CUDA.DeviceCount: // insufficient CUDA devices
case ctr.RuntimeConstraints.CUDA.DeviceCount > 0 && (driverInsuff || driverErr != nil): // insufficient driver version
case ctr.RuntimeConstraints.CUDA.DeviceCount > 0 && (capabilityInsuff || capabilityErr != nil): // insufficient hardware capability
// Don't select this node
default:
// Didn't reject the node, so select it
- // Lower price || (same price && better specs)
- best = it
- ok = true
+ types = append(types, it)
+ if newmax := it.Price * maxPriceFactor; newmax < maxPrice || maxPrice == 0 {
+ maxPrice = newmax
+ }
}
}
- if !ok {
+ if len(types) == 0 {
availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
for _, t := range cc.InstanceTypes {
availableTypes = append(availableTypes, t)
@@ -145,11 +160,39 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
sort.Slice(availableTypes, func(a, b int) bool {
return availableTypes[a].Price < availableTypes[b].Price
})
- err = ConstraintsNotSatisfiableError{
+ return nil, ConstraintsNotSatisfiableError{
errors.New("constraints not satisfiable by any configured instance type"),
availableTypes,
}
- return
}
- return
+ sort.Slice(types, func(i, j int) bool {
+ if types[i].Price != types[j].Price {
+ // prefer lower price
+ return types[i].Price < types[j].Price
+ }
+ if types[i].RAM != types[j].RAM {
+ // if same price, prefer more RAM
+ return types[i].RAM > types[j].RAM
+ }
+ if types[i].VCPUs != types[j].VCPUs {
+ // if same price and RAM, prefer more VCPUs
+ return types[i].VCPUs > types[j].VCPUs
+ }
+ if types[i].Scratch != types[j].Scratch {
+ // if same price and RAM and VCPUs, prefer more scratch
+ return types[i].Scratch > types[j].Scratch
+ }
+ // no preference, just sort the same way each time
+ return types[i].Name < types[j].Name
+ })
+ // Truncate types at maxPrice. We rejected it.Price>maxPrice
+ // in the loop above, but at that point maxPrice wasn't
+ // necessarily the final (lowest) maxPrice.
+ for i, it := range types {
+ if i > 0 && it.Price > maxPrice {
+ types = types[:i]
+ break
+ }
+ }
+ return types, nil
}
diff --git a/lib/dispatchcloud/node_size_test.go b/lib/dispatchcloud/node_size_test.go
index eb3648e8ac..5d2713e982 100644
--- a/lib/dispatchcloud/node_size_test.go
+++ b/lib/dispatchcloud/node_size_test.go
@@ -80,7 +80,10 @@ func (*NodeSizeSuite) TestChoose(c *check.C) {
"costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
},
} {
- best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{ReserveExtraRAM: 268435456}}, &arvados.Container{
+ best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{
+ LocalKeepBlobBuffersPerVCPU: 1,
+ ReserveExtraRAM: 268435456,
+ }}, &arvados.Container{
Mounts: map[string]arvados.Mount{
"/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
},
@@ -90,15 +93,78 @@ func (*NodeSizeSuite) TestChoose(c *check.C) {
KeepCacheRAM: 123456789,
},
})
- c.Check(err, check.IsNil)
- c.Check(best.Name, check.Equals, "best")
- c.Check(best.RAM >= 1234567890, check.Equals, true)
- c.Check(best.VCPUs >= 2, check.Equals, true)
- c.Check(best.Scratch >= 2*GiB, check.Equals, true)
+ c.Assert(err, check.IsNil)
+ c.Assert(best, check.Not(check.HasLen), 0)
+ c.Check(best[0].Name, check.Equals, "best")
+ c.Check(best[0].RAM >= 1234567890, check.Equals, true)
+ c.Check(best[0].VCPUs >= 2, check.Equals, true)
+ c.Check(best[0].Scratch >= 2*GiB, check.Equals, true)
+ for i := range best {
+ // If multiple instance types are returned
+ // then they should all have the same price,
+ // because we didn't set MaximumPriceFactor>1.
+ c.Check(best[i].Price, check.Equals, best[0].Price)
+ }
+ }
+}
+
+func (*NodeSizeSuite) TestMaximumPriceFactor(c *check.C) {
+ menu := map[string]arvados.InstanceType{
+ "best+7": {Price: 3.4, RAM: 8000000000, VCPUs: 8, Scratch: 64 * GiB, Name: "best+7"},
+ "best+5": {Price: 3.0, RAM: 8000000000, VCPUs: 8, Scratch: 16 * GiB, Name: "best+5"},
+ "best+3": {Price: 2.6, RAM: 4000000000, VCPUs: 8, Scratch: 16 * GiB, Name: "best+3"},
+ "best+2": {Price: 2.4, RAM: 4000000000, VCPUs: 8, Scratch: 4 * GiB, Name: "best+2"},
+ "best+1": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 4 * GiB, Name: "best+1"},
+ "best": {Price: 2.0, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+ "small+1": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 16 * GiB, Name: "small+1"},
+ "small": {Price: 1.0, RAM: 2000000000, VCPUs: 2, Scratch: 1 * GiB, Name: "small"},
+ }
+ best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{
+ MaximumPriceFactor: 1.5,
+ }}, &arvados.Container{
+ Mounts: map[string]arvados.Mount{
+ "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
+ },
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 987654321,
+ KeepCacheRAM: 123456789,
+ },
+ })
+ c.Assert(err, check.IsNil)
+ c.Assert(best, check.HasLen, 5)
+ c.Check(best[0].Name, check.Equals, "best") // best price is $2
+ c.Check(best[1].Name, check.Equals, "best+1")
+ c.Check(best[2].Name, check.Equals, "best+2")
+ c.Check(best[3].Name, check.Equals, "best+3")
+ c.Check(best[4].Name, check.Equals, "best+5") // max price is $2 * 1.5 = $3
+}
+
+func (*NodeSizeSuite) TestChooseWithBlobBuffersOverhead(c *check.C) {
+ menu := map[string]arvados.InstanceType{
+ "nearly": {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "small"},
+ "best": {Price: 3.3, RAM: 8000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+ "costly": {Price: 4.4, RAM: 12000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
}
+ best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{
+ LocalKeepBlobBuffersPerVCPU: 16, // 1 GiB per vcpu => 2 GiB
+ ReserveExtraRAM: 268435456,
+ }}, &arvados.Container{
+ Mounts: map[string]arvados.Mount{
+ "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
+ },
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 987654321,
+ KeepCacheRAM: 123456789,
+ },
+ })
+ c.Check(err, check.IsNil)
+ c.Assert(best, check.HasLen, 1)
+ c.Check(best[0].Name, check.Equals, "best")
}
-func (*NodeSizeSuite) TestChoosePreemptable(c *check.C) {
+func (*NodeSizeSuite) TestChoosePreemptible(c *check.C) {
menu := map[string]arvados.InstanceType{
"costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Preemptible: true, Name: "costly"},
"almost best": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "almost best"},
@@ -119,11 +185,12 @@ func (*NodeSizeSuite) TestChoosePreemptable(c *check.C) {
},
})
c.Check(err, check.IsNil)
- c.Check(best.Name, check.Equals, "best")
- c.Check(best.RAM >= 1234567890, check.Equals, true)
- c.Check(best.VCPUs >= 2, check.Equals, true)
- c.Check(best.Scratch >= 2*GiB, check.Equals, true)
- c.Check(best.Preemptible, check.Equals, true)
+ c.Assert(best, check.HasLen, 1)
+ c.Check(best[0].Name, check.Equals, "best")
+ c.Check(best[0].RAM >= 1234567890, check.Equals, true)
+ c.Check(best[0].VCPUs >= 2, check.Equals, true)
+ c.Check(best[0].Scratch >= 2*GiB, check.Equals, true)
+ c.Check(best[0].Preemptible, check.Equals, true)
}
func (*NodeSizeSuite) TestScratchForDockerImage(c *check.C) {
@@ -226,9 +293,10 @@ func (*NodeSizeSuite) TestChooseGPU(c *check.C) {
CUDA: tc.CUDA,
},
})
- if best.Name != "" {
+ if len(best) > 0 {
c.Check(err, check.IsNil)
- c.Check(best.Name, check.Equals, tc.SelectedInstance)
+ c.Assert(best, check.HasLen, 1)
+ c.Check(best[0].Name, check.Equals, tc.SelectedInstance)
} else {
c.Check(err, check.Not(check.IsNil))
}
diff --git a/lib/dispatchcloud/scheduler/interfaces.go b/lib/dispatchcloud/scheduler/interfaces.go
index 78f8c804e2..6e56bd8c40 100644
--- a/lib/dispatchcloud/scheduler/interfaces.go
+++ b/lib/dispatchcloud/scheduler/interfaces.go
@@ -34,6 +34,7 @@ type WorkerPool interface {
Running() map[string]time.Time
Unallocated() map[arvados.InstanceType]int
CountWorkers() map[worker.State]int
+ AtCapacity(arvados.InstanceType) bool
AtQuota() bool
Create(arvados.InstanceType) bool
Shutdown(arvados.InstanceType) bool
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index f729f0dc23..d270972295 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -5,6 +5,7 @@
package scheduler
import (
+ "fmt"
"sort"
"time"
@@ -13,14 +14,71 @@ import (
"github.com/sirupsen/logrus"
)
+var quietAfter503 = time.Minute
+
+type QueueEnt struct {
+ container.QueueEnt
+
+ // Human-readable scheduling status as of the last scheduling
+ // iteration.
+ SchedulingStatus string `json:"scheduling_status"`
+}
+
+const (
+ schedStatusPreparingRuntimeEnvironment = "preparing runtime environment"
+ schedStatusPriorityZero = "not scheduling: priority 0" // ", state X" appended at runtime
+ schedStatusContainerLimitReached = "not starting: supervisor container limit has been reached"
+ schedStatusWaitingForPreviousAttempt = "waiting for previous attempt to exit"
+ schedStatusWaitingNewInstance = "waiting for new instance to be ready"
+ schedStatusWaitingInstanceType = "waiting for suitable instance type to become available" // ": queue position X" appended at runtime
+ schedStatusWaitingCloudResources = "waiting for cloud resources"
+ schedStatusWaitingClusterCapacity = "waiting while cluster is running at capacity" // ": queue position X" appended at runtime
+)
+
+// Queue returns the sorted queue from the last scheduling iteration.
+func (sch *Scheduler) Queue() []QueueEnt {
+ ents, _ := sch.lastQueue.Load().([]QueueEnt)
+ return ents
+}
+
func (sch *Scheduler) runQueue() {
+ running := sch.pool.Running()
+ unalloc := sch.pool.Unallocated()
+
+ totalInstances := 0
+ for _, n := range sch.pool.CountWorkers() {
+ totalInstances += n
+ }
+
unsorted, _ := sch.queue.Entries()
- sorted := make([]container.QueueEnt, 0, len(unsorted))
+ sorted := make([]QueueEnt, 0, len(unsorted))
for _, ent := range unsorted {
- sorted = append(sorted, ent)
+ sorted = append(sorted, QueueEnt{QueueEnt: ent})
}
sort.Slice(sorted, func(i, j int) bool {
- if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
+ _, irunning := running[sorted[i].Container.UUID]
+ _, jrunning := running[sorted[j].Container.UUID]
+ if irunning != jrunning {
+ // Ensure the "tryrun" loop (see below) sees
+ // already-scheduled containers first, to
+ // ensure existing supervisor containers are
+ // properly counted before we decide whether
+ // we have room for new ones.
+ return irunning
+ }
+ ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
+ jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
+ if ilocked != jlocked {
+ // Give precedence to containers that we have
+ // already locked, even if higher-priority
+ // containers have since arrived in the
+ // queue. This avoids undesirable queue churn
+ // effects including extra lock/unlock cycles
+ // and bringing up new instances and quickly
+ // shutting them down to make room for
+ // different instance sizes.
+ return ilocked
+ } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
return pi > pj
} else {
// When containers have identical priority,
@@ -32,45 +90,211 @@ func (sch *Scheduler) runQueue() {
}
})
- running := sch.pool.Running()
- unalloc := sch.pool.Unallocated()
+ if t := sch.client.Last503(); t.After(sch.last503time) {
+ // API has sent an HTTP 503 response since last time
+ // we checked. Use current #containers - 1 as
+ // maxConcurrency, i.e., try to stay just below the
+ // level where we see 503s.
+ sch.last503time = t
+ if newlimit := len(running) - 1; newlimit < 1 {
+ sch.maxConcurrency = 1
+ } else {
+ sch.maxConcurrency = newlimit
+ }
+ } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
+ // If we haven't seen any 503 errors lately, raise
+ // limit to ~10% beyond the current workload.
+ //
+ // As we use the added 10% to schedule more
+ // containers, len(running) will increase and we'll
+ // push the limit up further. Soon enough,
+ // maxConcurrency will get high enough to schedule the
+ // entire queue, hit pool quota, or get 503s again.
+ max := len(running)*11/10 + 1
+ if sch.maxConcurrency < max {
+ sch.maxConcurrency = max
+ }
+ }
+ if sch.last503time.IsZero() {
+ sch.mLast503Time.Set(0)
+ } else {
+ sch.mLast503Time.Set(float64(sch.last503time.Unix()))
+ }
+ if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
+ sch.maxConcurrency = sch.maxInstances
+ }
+ if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < totalInstances {
+ // Evidently it is possible to run this many
+ // instances, so raise our estimate.
+ sch.instancesWithinQuota = totalInstances
+ }
+ if sch.pool.AtQuota() {
+ // Consider current workload to be the maximum
+ // allowed, for the sake of reporting metrics and
+ // calculating max supervisors.
+ //
+ // Now that sch.maxConcurrency is set, we will only
+ // raise it past len(running) by 10%. This helps
+ // avoid running an inappropriate number of
+ // supervisors when we reach the cloud-imposed quota
+ // (which may be based on # CPUs etc) long before the
+ // configured MaxInstances.
+ if sch.maxConcurrency == 0 || sch.maxConcurrency > totalInstances {
+ if totalInstances == 0 {
+ sch.maxConcurrency = 1
+ } else {
+ sch.maxConcurrency = totalInstances
+ }
+ }
+ sch.instancesWithinQuota = totalInstances
+ } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
+ // Once we've hit a quota error and started tracking
+ // instancesWithinQuota (i.e., it's not zero), we
+ // avoid exceeding that known-working level by more
+ // than 1.
+ //
+ // If we don't do this, we risk entering a pattern of
+ // repeatedly locking several containers, hitting
+ // quota again, and unlocking them again each time the
+ // driver stops reporting AtQuota, which tends to use
+ // up the max lock/unlock cycles on the next few
+ // containers in the queue, and cause them to fail.
+ sch.maxConcurrency = sch.instancesWithinQuota + 1
+ }
+ sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
+
+ maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
+ if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
+ maxSupervisors = 1
+ }
sch.logger.WithFields(logrus.Fields{
- "Containers": len(sorted),
- "Processes": len(running),
+ "Containers": len(sorted),
+ "Processes": len(running),
+ "maxConcurrency": sch.maxConcurrency,
}).Debug("runQueue")
dontstart := map[arvados.InstanceType]bool{}
- var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
+ var atcapacity = map[string]bool{} // ProviderTypes reported as AtCapacity during this runQueue() invocation
+ var overquota []QueueEnt // entries that are unmappable because of worker pool quota
+ var overmaxsuper []QueueEnt // unmappable because max supervisors (these are not included in overquota)
var containerAllocatedWorkerBootingCount int
+ // trying is #containers running + #containers we're trying to
+ // start. We stop trying to start more containers if this
+ // reaches the dynamic maxConcurrency limit.
+ trying := len(running)
+
+ qpos := 0
+ supervisors := 0
+
tryrun:
- for i, ctr := range sorted {
- ctr, it := ctr.Container, ctr.InstanceType
+ for i, ent := range sorted {
+ ctr, types := ent.Container, ent.InstanceTypes
logger := sch.logger.WithFields(logrus.Fields{
"ContainerUUID": ctr.UUID,
- "InstanceType": it.Name,
})
- if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
+ if ctr.SchedulingParameters.Supervisor {
+ supervisors += 1
+ }
+ if _, running := running[ctr.UUID]; running {
+ if ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {
+ sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
+ }
+ continue
+ }
+ if ctr.Priority < 1 {
+ sorted[i].SchedulingStatus = schedStatusPriorityZero + ", state " + string(ctr.State)
continue
}
+ if ctr.SchedulingParameters.Supervisor && maxSupervisors > 0 && supervisors > maxSupervisors {
+ overmaxsuper = append(overmaxsuper, sorted[i])
+ sorted[i].SchedulingStatus = schedStatusContainerLimitReached
+ continue
+ }
+ // If we have unalloc instances of any of the eligible
+ // instance types, unallocOK is true and unallocType
+ // is the lowest-cost type.
+ var unallocOK bool
+ var unallocType arvados.InstanceType
+ for _, it := range types {
+ if unalloc[it] > 0 {
+ unallocOK = true
+ unallocType = it
+ break
+ }
+ }
+ // If the pool is not reporting AtCapacity for any of
+ // the eligible instance types, availableOK is true
+ // and availableType is the lowest-cost type.
+ var availableOK bool
+ var availableType arvados.InstanceType
+ for _, it := range types {
+ if atcapacity[it.ProviderType] {
+ continue
+ } else if sch.pool.AtCapacity(it) {
+ atcapacity[it.ProviderType] = true
+ continue
+ } else {
+ availableOK = true
+ availableType = it
+ break
+ }
+ }
switch ctr.State {
case arvados.ContainerStateQueued:
- if unalloc[it] < 1 && sch.pool.AtQuota() {
- logger.Debug("not locking: AtQuota and no unalloc workers")
+ if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
+ logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
+ continue
+ }
+ trying++
+ if !unallocOK && sch.pool.AtQuota() {
+ logger.Trace("not starting: AtQuota and no unalloc workers")
overquota = sorted[i:]
break tryrun
}
+ if !unallocOK && !availableOK {
+ logger.Trace("not locking: AtCapacity and no unalloc workers")
+ continue
+ }
if sch.pool.KillContainer(ctr.UUID, "about to lock") {
logger.Info("not locking: crunch-run process from previous attempt has not exited")
continue
}
go sch.lockContainer(logger, ctr.UUID)
- unalloc[it]--
+ unalloc[unallocType]--
case arvados.ContainerStateLocked:
- if unalloc[it] > 0 {
- unalloc[it]--
- } else if sch.pool.AtQuota() {
+ if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
+ logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
+ continue
+ }
+ trying++
+ if unallocOK {
+ // We have a suitable instance type,
+ // so mark it as allocated, and try to
+ // start the container.
+ unalloc[unallocType]--
+ logger = logger.WithField("InstanceType", unallocType.Name)
+ if dontstart[unallocType] {
+ // We already tried & failed to start
+ // a higher-priority container on the
+ // same instance type. Don't let this
+ // one sneak in ahead of it.
+ } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
+ sorted[i].SchedulingStatus = schedStatusWaitingForPreviousAttempt
+ logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
+ } else if sch.pool.StartContainer(unallocType, ctr) {
+ sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
+ logger.Trace("StartContainer => true")
+ } else {
+ sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
+ logger.Trace("StartContainer => false")
+ containerAllocatedWorkerBootingCount += 1
+ dontstart[unallocType] = true
+ }
+ continue
+ }
+ if sch.pool.AtQuota() {
// Don't let lower-priority containers
// starve this one by using keeping
// idle workers alive on different
@@ -78,55 +302,104 @@ tryrun:
logger.Trace("overquota")
overquota = sorted[i:]
break tryrun
- } else if sch.pool.Create(it) {
- // Success. (Note pool.Create works
- // asynchronously and does its own
- // logging about the eventual outcome,
- // so we don't need to.)
- logger.Info("creating new instance")
- } else {
+ }
+ if !availableOK {
+ // Continue trying lower-priority
+ // containers in case they can run on
+ // different instance types that are
+ // available.
+ //
+ // The local "atcapacity" cache helps
+ // when the pool's flag resets after
+ // we look at container A but before
+ // we look at lower-priority container
+ // B. In that case we want to run
+ // container A on the next call to
+ // runQueue(), rather than run
+ // container B now.
+ qpos++
+ sorted[i].SchedulingStatus = schedStatusWaitingInstanceType + fmt.Sprintf(": queue position %d", qpos)
+ logger.Trace("all eligible types at capacity")
+ continue
+ }
+ logger = logger.WithField("InstanceType", availableType.Name)
+ if !sch.pool.Create(availableType) {
// Failed despite not being at quota,
- // e.g., cloud ops throttled. TODO:
- // avoid getting starved here if
- // instances of a specific type always
- // fail.
+ // e.g., cloud ops throttled.
logger.Trace("pool declined to create new instance")
continue
}
-
- if dontstart[it] {
- // We already tried & failed to start
- // a higher-priority container on the
- // same instance type. Don't let this
- // one sneak in ahead of it.
- } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
- logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
- } else if sch.pool.StartContainer(it, ctr) {
- // Success.
- } else {
- containerAllocatedWorkerBootingCount += 1
- dontstart[it] = true
- }
+ // Success. (Note pool.Create works
+ // asynchronously and does its own logging
+ // about the eventual outcome, so we don't
+ // need to.)
+ sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
+ logger.Info("creating new instance")
+ // Don't bother trying to start the container
+ // yet -- obviously the instance will take
+ // some time to boot and become ready.
+ containerAllocatedWorkerBootingCount += 1
+ dontstart[availableType] = true
}
}
sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
- sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
+ sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
- if len(overquota) > 0 {
+ var qreason string
+ if sch.pool.AtQuota() {
+ qreason = schedStatusWaitingCloudResources
+ } else {
+ qreason = schedStatusWaitingClusterCapacity
+ }
+ for i, ent := range sorted {
+ if ent.SchedulingStatus == "" && (ent.Container.State == arvados.ContainerStateQueued || ent.Container.State == arvados.ContainerStateLocked) {
+ qpos++
+ sorted[i].SchedulingStatus = fmt.Sprintf("%s: queue position %d", qreason, qpos)
+ }
+ }
+ sch.lastQueue.Store(sorted)
+
+ if len(overquota)+len(overmaxsuper) > 0 {
// Unlock any containers that are unmappable while
- // we're at quota.
- for _, ctr := range overquota {
+ // we're at quota (but if they have already been
+ // scheduled and they're loading docker images etc.,
+ // let them run).
+ var unlock []QueueEnt
+ unlock = append(unlock, overmaxsuper...)
+ if totalInstances > 0 && len(overquota) > 1 {
+ // We don't unlock the next-in-line container
+ // when at quota. This avoids a situation
+ // where our "at quota" state expires, we lock
+ // the next container and try to create an
+ // instance, the cloud provider still returns
+ // a quota error, we unlock the container, and
+ // we repeat this until the container reaches
+ // its limit of lock/unlock cycles.
+ unlock = append(unlock, overquota[1:]...)
+ } else {
+ // However, if totalInstances is 0 and we're
+ // still getting quota errors, then the
+ // next-in-line container is evidently not
+ // possible to run, so we should let it
+ // exhaust its lock/unlock cycles and
+ // eventually cancel, to avoid starvation.
+ unlock = append(unlock, overquota...)
+ }
+ for _, ctr := range unlock {
ctr := ctr.Container
- if ctr.State == arvados.ContainerStateLocked {
+ _, toolate := running[ctr.UUID]
+ if ctr.State == arvados.ContainerStateLocked && !toolate {
logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
- logger.Debug("unlock because pool capacity is used by higher priority containers")
+ logger.Info("unlock because pool capacity is used by higher priority containers")
err := sch.queue.Unlock(ctr.UUID)
if err != nil {
logger.WithError(err).Warn("error unlocking")
}
}
}
+ }
+ if len(overquota) > 0 {
// Shut down idle workers that didn't get any
// containers mapped onto them before we hit quota.
for it, n := range unalloc {
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index 5b5fa960a1..e4a05daba5 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -29,19 +29,15 @@ var (
}()
)
-type stubQuotaError struct {
- error
-}
-
-func (stubQuotaError) IsQuotaError() bool { return true }
-
type stubPool struct {
notify <-chan struct{}
unalloc map[arvados.InstanceType]int // idle+booting+unknown
+ busy map[arvados.InstanceType]int
idle map[arvados.InstanceType]int
unknown map[arvados.InstanceType]int
running map[string]time.Time
quota int
+ capacity map[string]int
canCreate int
creates []arvados.InstanceType
starts []string
@@ -52,7 +48,28 @@ type stubPool struct {
func (p *stubPool) AtQuota() bool {
p.Lock()
defer p.Unlock()
- return len(p.unalloc)+len(p.running)+len(p.unknown) >= p.quota
+ n := len(p.running)
+ for _, nn := range p.unalloc {
+ n += nn
+ }
+ for _, nn := range p.unknown {
+ n += nn
+ }
+ return n >= p.quota
+}
+func (p *stubPool) AtCapacity(it arvados.InstanceType) bool {
+ supply, ok := p.capacity[it.ProviderType]
+ if !ok {
+ return false
+ }
+ for _, existing := range []map[arvados.InstanceType]int{p.unalloc, p.busy} {
+ for eit, n := range existing {
+ if eit.ProviderType == it.ProviderType {
+ supply -= n
+ }
+ }
+ }
+ return supply < 1
}
func (p *stubPool) Subscribe() <-chan struct{} { return p.notify }
func (p *stubPool) Unsubscribe(<-chan struct{}) {}
@@ -115,14 +132,15 @@ func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container
if p.idle[it] == 0 {
return false
}
+ p.busy[it]++
p.idle[it]--
p.unalloc[it]--
p.running[ctr.UUID] = time.Time{}
return true
}
-func chooseType(ctr *arvados.Container) (arvados.InstanceType, error) {
- return test.InstanceType(ctr.RuntimeConstraints.VCPUs), nil
+func chooseType(ctr *arvados.Container) ([]arvados.InstanceType, error) {
+ return []arvados.InstanceType{test.InstanceType(ctr.RuntimeConstraints.VCPUs)}, nil
}
var _ = check.Suite(&SchedulerSuite{})
@@ -185,10 +203,11 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
test.InstanceType(1): 1,
test.InstanceType(2): 2,
},
+ busy: map[arvados.InstanceType]int{},
running: map[string]time.Time{},
canCreate: 0,
}
- New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+ New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue()
c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)})
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
c.Check(pool.running, check.HasLen, 1)
@@ -201,12 +220,8 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
// call Create().
func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
- for quota := 1; quota < 3; quota++ {
+ for quota := 1; quota <= 3; quota++ {
c.Logf("quota=%d", quota)
- shouldCreate := []arvados.InstanceType{}
- for i := 1; i < quota; i++ {
- shouldCreate = append(shouldCreate, test.InstanceType(3))
- }
queue := test.Queue{
ChooseType: chooseType,
Containers: []arvados.Container{
@@ -239,28 +254,338 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
idle: map[arvados.InstanceType]int{
test.InstanceType(2): 2,
},
+ busy: map[arvados.InstanceType]int{},
running: map[string]time.Time{},
creates: []arvados.InstanceType{},
starts: []string{},
canCreate: 0,
}
- sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
- sch.runQueue()
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
sch.sync()
sch.runQueue()
sch.sync()
- c.Check(pool.creates, check.DeepEquals, shouldCreate)
- if len(shouldCreate) == 0 {
- c.Check(pool.starts, check.DeepEquals, []string{})
- } else {
+ switch quota {
+ case 1, 2:
+ // Can't create a type3 node for ctr3, so we
+ // shutdown an unallocated node (type2), and
+ // unlock the 2nd-in-line container, but not
+ // the 1st-in-line container.
+ c.Check(pool.starts, check.HasLen, 0)
+ c.Check(pool.shutdowns, check.Equals, 1)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: test.ContainerUUID(2), From: "Locked", To: "Queued"},
+ })
+ case 3:
+ // Creating a type3 instance works, so we
+ // start ctr2 on a type2 instance, and leave
+ // ctr3 locked while we wait for the new
+ // instance to come up.
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(3)})
+ c.Check(queue.StateChanges(), check.HasLen, 0)
+ default:
+ panic("test not written for quota>3")
}
- c.Check(pool.shutdowns, check.Equals, 3-quota)
- c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
- {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
- {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
+ }
+}
+
+// If pool.AtCapacity(it) is true for one instance type, try running a
+// lower-priority container that uses a different node type. Don't
+// lock/unlock/start any container that requires the affected instance
+// type.
+func (*SchedulerSuite) TestInstanceCapacity(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Containers: []arvados.Container{
+ {
+ UUID: test.ContainerUUID(1),
+ Priority: 1,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(2),
+ Priority: 2,
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 4,
+ RAM: 4 << 30,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(3),
+ Priority: 3,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 4,
+ RAM: 4 << 30,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(4),
+ Priority: 4,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 4,
+ RAM: 4 << 30,
+ },
+ },
+ },
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 99,
+ capacity: map[string]int{test.InstanceType(4).ProviderType: 1},
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(4): 1,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(4): 1,
+ },
+ busy: map[arvados.InstanceType]int{},
+ running: map[string]time.Time{},
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 99,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ // Start container4, but then pool reports AtCapacity for
+ // type4, so we skip trying to create an instance for
+ // container3, skip locking container2, but do try to create a
+ // type1 instance for container1.
+ c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)})
+ c.Check(queue.StateChanges(), check.HasLen, 0)
+}
+
+// Don't unlock containers or shutdown unalloc (booting/idle) nodes
+// just because some 503 errors caused us to reduce maxConcurrency
+// below the current load level.
+//
+// We expect to raise maxConcurrency soon when we stop seeing 503s. If
+// that doesn't happen soon, the idle timeout will take care of the
+// excess nodes.
+func (*SchedulerSuite) TestIdleIn503QuietPeriod(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Containers: []arvados.Container{
+ // scheduled on an instance (but not Running yet)
+ {
+ UUID: test.ContainerUUID(1),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ },
+ // not yet scheduled
+ {
+ UUID: test.ContainerUUID(2),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ },
+ // scheduled on an instance (but not Running yet)
+ {
+ UUID: test.ContainerUUID(3),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ },
+ // not yet scheduled
+ {
+ UUID: test.ContainerUUID(4),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ },
+ // not yet locked
+ {
+ UUID: test.ContainerUUID(5),
+ Priority: 1000,
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ },
+ },
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ test.InstanceType(3): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ test.InstanceType(3): 1,
+ },
+ busy: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ test.InstanceType(3): 1,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(1): {},
+ test.ContainerUUID(3): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
+ sch.last503time = time.Now()
+ sch.maxConcurrency = 3
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.HasLen, 0)
+}
+
+// If we somehow have more supervisor containers in Locked state than
+// we should (e.g., config changed since they started), and some
+// appropriate-sized instances booting up, unlock the excess
+// supervisor containers, but let the instances keep booting.
+func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ }
+ for i := 1; i <= 6; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: int64(1000 - i),
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ })
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ },
+ busy: map[arvados.InstanceType]int{
+ test.InstanceType(2): 4,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(1): {},
+ test.ContainerUUID(2): {},
+ test.ContainerUUID(3): {},
+ test.ContainerUUID(4): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5)
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.DeepEquals, []string{})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: test.ContainerUUID(5), From: "Locked", To: "Queued"},
+ {UUID: test.ContainerUUID(6), From: "Locked", To: "Queued"},
+ })
+}
+
+// Assuming we're not at quota, don't try to shutdown idle nodes
+// merely because we have more queued/locked supervisor containers
+// than MaxSupervisors -- it won't help.
+func (*SchedulerSuite) TestExcessSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ }
+ for i := 1; i <= 8; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: int64(1000 + i),
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
})
}
+ for i := 2; i < 4; i++ {
+ queue.Containers[i].State = arvados.ContainerStateLocked
+ }
+ for i := 4; i < 6; i++ {
+ queue.Containers[i].State = arvados.ContainerStateRunning
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ },
+ busy: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(5): {},
+ test.ContainerUUID(6): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5)
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.HasLen, 2)
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.HasLen, 0)
}
// Don't flap lock/unlock when equal-priority containers compete for
@@ -293,24 +618,25 @@ func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
pool := stubPool{
quota: 2,
unalloc: map[arvados.InstanceType]int{
- test.InstanceType(3): 1,
+ test.InstanceType(3): 2,
},
idle: map[arvados.InstanceType]int{
- test.InstanceType(3): 1,
+ test.InstanceType(3): 2,
},
+ busy: map[arvados.InstanceType]int{},
running: map[string]time.Time{},
creates: []arvados.InstanceType{},
starts: []string{},
- canCreate: 1,
+ canCreate: 0,
}
- sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
for i := 0; i < 30; i++ {
sch.runQueue()
sch.sync()
time.Sleep(time.Millisecond)
}
c.Check(pool.shutdowns, check.Equals, 0)
- c.Check(pool.starts, check.HasLen, 1)
+ c.Check(pool.starts, check.HasLen, 2)
unlocked := map[string]int{}
for _, chg := range queue.StateChanges() {
if chg.To == arvados.ContainerStateQueued {
@@ -336,6 +662,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
test.InstanceType(1): 1,
test.InstanceType(2): 1,
},
+ busy: map[arvados.InstanceType]int{},
running: map[string]time.Time{},
canCreate: 4,
}
@@ -405,7 +732,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
},
}
queue.Update()
- New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+ New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue()
c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)})
c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]})
running := map[string]bool{}
@@ -429,6 +756,9 @@ func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
idle: map[arvados.InstanceType]int{
test.InstanceType(2): 0,
},
+ busy: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ },
running: map[string]time.Time{
test.ContainerUUID(2): {},
},
@@ -449,7 +779,7 @@ func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
},
}
queue.Update()
- sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
c.Check(pool.running, check.HasLen, 1)
sch.sync()
for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {
@@ -482,7 +812,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
pool := stubPool{
unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
}
- sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
sch.runQueue()
sch.updateMetrics()
@@ -494,7 +824,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
// 'over quota' metric will be 1 because no workers are available and canCreate defaults
// to zero.
pool = stubPool{}
- sch = New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
sch.runQueue()
sch.updateMetrics()
@@ -525,11 +855,85 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
pool = stubPool{
idle: map[arvados.InstanceType]int{test.InstanceType(1): 1},
unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
+ busy: map[arvados.InstanceType]int{},
running: map[string]time.Time{},
}
- sch = New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
sch.runQueue()
sch.updateMetrics()
c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0)
}
+
+// Assign priority=4, 3 and 1 containers to idle nodes. Ignore the supervisor at priority 2.
+func (*SchedulerSuite) TestSkipSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Containers: []arvados.Container{
+ {
+ UUID: test.ContainerUUID(1),
+ Priority: 1,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(2),
+ Priority: 2,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(3),
+ Priority: 3,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(4),
+ Priority: 4,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ },
+ },
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 1000,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(1): 4,
+ test.InstanceType(2): 4,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(1): 4,
+ test.InstanceType(2): 4,
+ },
+ busy: map[arvados.InstanceType]int{},
+ running: map[string]time.Time{},
+ canCreate: 0,
+ }
+ New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 10, 0.2).runQueue()
+ c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType(nil))
+ c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)})
+}
diff --git a/lib/dispatchcloud/scheduler/scheduler.go b/lib/dispatchcloud/scheduler/scheduler.go
index c3e67dd11f..bc6574a21a 100644
--- a/lib/dispatchcloud/scheduler/scheduler.go
+++ b/lib/dispatchcloud/scheduler/scheduler.go
@@ -9,6 +9,7 @@ package scheduler
import (
"context"
"sync"
+ "sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -31,6 +32,7 @@ import (
// shuts down idle workers, in case they are consuming quota.
type Scheduler struct {
logger logrus.FieldLogger
+ client *arvados.Client
queue ContainerQueue
pool WorkerPool
reg *prometheus.Registry
@@ -45,18 +47,29 @@ type Scheduler struct {
stop chan struct{}
stopped chan struct{}
+ last503time time.Time // last time API responded 503
+ maxConcurrency int // dynamic container limit (0 = unlimited), see runQueue()
+ supervisorFraction float64 // maximum fraction of "supervisor" containers (these are containers who's main job is to launch other containers, e.g. workflow runners)
+ maxInstances int // maximum number of instances the pool will bring up (0 = unlimited)
+ instancesWithinQuota int // max concurrency achieved since last quota error (0 = no quota error yet)
+
mContainersAllocatedNotStarted prometheus.Gauge
mContainersNotAllocatedOverQuota prometheus.Gauge
mLongestWaitTimeSinceQueue prometheus.Gauge
+ mLast503Time prometheus.Gauge
+ mMaxContainerConcurrency prometheus.Gauge
+
+ lastQueue atomic.Value // stores a []QueueEnt
}
// New returns a new unstarted Scheduler.
//
// Any given queue and pool should not be used by more than one
// scheduler at a time.
-func New(ctx context.Context, queue ContainerQueue, pool WorkerPool, reg *prometheus.Registry, staleLockTimeout, queueUpdateInterval time.Duration) *Scheduler {
+func New(ctx context.Context, client *arvados.Client, queue ContainerQueue, pool WorkerPool, reg *prometheus.Registry, staleLockTimeout, queueUpdateInterval time.Duration, minQuota, maxInstances int, supervisorFraction float64) *Scheduler {
sch := &Scheduler{
logger: ctxlog.FromContext(ctx),
+ client: client,
queue: queue,
pool: pool,
reg: reg,
@@ -66,6 +79,13 @@ func New(ctx context.Context, queue ContainerQueue, pool WorkerPool, reg *promet
stop: make(chan struct{}),
stopped: make(chan struct{}),
uuidOp: map[string]string{},
+ supervisorFraction: supervisorFraction,
+ maxInstances: maxInstances,
+ }
+ if minQuota > 0 {
+ sch.maxConcurrency = minQuota
+ } else {
+ sch.maxConcurrency = maxInstances
}
sch.registerMetrics(reg)
return sch
@@ -96,6 +116,32 @@ func (sch *Scheduler) registerMetrics(reg *prometheus.Registry) {
Help: "Current longest wait time of any container since queuing, and before the start of crunch-run.",
})
reg.MustRegister(sch.mLongestWaitTimeSinceQueue)
+ sch.mLast503Time = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "last_503_time",
+ Help: "Time of most recent 503 error received from API.",
+ })
+ reg.MustRegister(sch.mLast503Time)
+ sch.mMaxContainerConcurrency = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "max_concurrent_containers",
+ Help: "Dynamically assigned limit on number of containers scheduled concurrency, set after receiving 503 errors from API.",
+ })
+ reg.MustRegister(sch.mMaxContainerConcurrency)
+ reg.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "at_quota",
+ Help: "Flag indicating the cloud driver is reporting an at-quota condition.",
+ }, func() float64 {
+ if sch.pool.AtQuota() {
+ return 1
+ } else {
+ return 0
+ }
+ }))
}
func (sch *Scheduler) updateMetrics() {
@@ -149,14 +195,23 @@ func (sch *Scheduler) run() {
}
// Keep the queue up to date.
- poll := time.NewTicker(sch.queueUpdateInterval)
- defer poll.Stop()
go func() {
- for range poll.C {
+ for {
+ starttime := time.Now()
err := sch.queue.Update()
if err != nil {
sch.logger.Errorf("error updating queue: %s", err)
}
+ // If the previous update took a long time,
+ // that probably means the server is
+ // overloaded, so wait that long before doing
+ // another. Otherwise, wait for the configured
+ // poll interval.
+ delay := time.Since(starttime)
+ if delay < sch.queueUpdateInterval {
+ delay = sch.queueUpdateInterval
+ }
+ time.Sleep(delay)
}
}()
diff --git a/lib/dispatchcloud/scheduler/sync_test.go b/lib/dispatchcloud/scheduler/sync_test.go
index a3ff0636e1..846bb4fc9e 100644
--- a/lib/dispatchcloud/scheduler/sync_test.go
+++ b/lib/dispatchcloud/scheduler/sync_test.go
@@ -48,7 +48,7 @@ func (*SchedulerSuite) TestForgetIrrelevantContainers(c *check.C) {
ents, _ := queue.Entries()
c.Check(ents, check.HasLen, 1)
- sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
sch.sync()
ents, _ = queue.Entries()
@@ -80,7 +80,7 @@ func (*SchedulerSuite) TestCancelOrphanedContainers(c *check.C) {
ents, _ := queue.Entries()
c.Check(ents, check.HasLen, 1)
- sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
// Sync shouldn't cancel the container because it might be
// running on the VM with state=="unknown".
diff --git a/lib/dispatchcloud/sshexecutor/executor.go b/lib/dispatchcloud/sshexecutor/executor.go
index c37169921c..3761c69922 100644
--- a/lib/dispatchcloud/sshexecutor/executor.go
+++ b/lib/dispatchcloud/sshexecutor/executor.go
@@ -18,6 +18,8 @@ import (
"golang.org/x/crypto/ssh"
)
+var ErrNoAddress = errors.New("instance has no address")
+
// New returns a new Executor, using the given target.
func New(t cloud.ExecutorTarget) *Executor {
return &Executor{target: t}
@@ -196,7 +198,7 @@ func (exr *Executor) TargetHostPort() (string, string) {
func (exr *Executor) setupSSHClient() (*ssh.Client, error) {
addr := net.JoinHostPort(exr.TargetHostPort())
if addr == ":" {
- return nil, errors.New("instance has no address")
+ return nil, ErrNoAddress
}
var receivedKey ssh.PublicKey
client, err := ssh.Dial("tcp", addr, &ssh.ClientConfig{
diff --git a/lib/dispatchcloud/sshexecutor/executor_test.go b/lib/dispatchcloud/sshexecutor/executor_test.go
index b4afeafa82..95b29fa6ac 100644
--- a/lib/dispatchcloud/sshexecutor/executor_test.go
+++ b/lib/dispatchcloud/sshexecutor/executor_test.go
@@ -6,6 +6,7 @@ package sshexecutor
import (
"bytes"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -146,6 +147,7 @@ func (s *ExecutorSuite) TestExecute(c *check.C) {
exr.SetTargetPort("0")
_, _, err = exr.Execute(nil, command, nil)
c.Check(err, check.ErrorMatches, `.*connection refused.*`)
+ c.Check(errors.As(err, new(*net.OpError)), check.Equals, true)
// Use the test server's listening port.
exr.SetTargetPort(target.Port())
diff --git a/lib/dispatchcloud/test/queue.go b/lib/dispatchcloud/test/queue.go
index fcb2cfb33b..ea2b98236f 100644
--- a/lib/dispatchcloud/test/queue.go
+++ b/lib/dispatchcloud/test/queue.go
@@ -22,7 +22,10 @@ type Queue struct {
// ChooseType will be called for each entry in Containers. It
// must not be nil.
- ChooseType func(*arvados.Container) (arvados.InstanceType, error)
+ ChooseType func(*arvados.Container) ([]arvados.InstanceType, error)
+
+ // Mimic railsapi implementation of MaxDispatchAttempts config
+ MaxDispatchAttempts int
Logger logrus.FieldLogger
@@ -133,7 +136,15 @@ func (q *Queue) changeState(uuid string, from, to arvados.ContainerState) error
q.entries[uuid] = ent
for i, ctr := range q.Containers {
if ctr.UUID == uuid {
- q.Containers[i].State = to
+ if max := q.MaxDispatchAttempts; max > 0 && ctr.LockCount >= max && to == arvados.ContainerStateQueued {
+ q.Containers[i].State = arvados.ContainerStateCancelled
+ q.Containers[i].RuntimeStatus = map[string]interface{}{"error": fmt.Sprintf("Failed to start: lock_count == %d", ctr.LockCount)}
+ } else {
+ q.Containers[i].State = to
+ if to == arvados.ContainerStateLocked {
+ q.Containers[i].LockCount++
+ }
+ }
break
}
}
@@ -156,11 +167,12 @@ func (q *Queue) Update() error {
ent.Container = ctr
upd[ctr.UUID] = ent
} else {
- it, _ := q.ChooseType(&ctr)
+ types, _ := q.ChooseType(&ctr)
+ ctr.Mounts = nil
upd[ctr.UUID] = container.QueueEnt{
- Container: ctr,
- InstanceType: it,
- FirstSeenAt: time.Now(),
+ Container: ctr,
+ InstanceTypes: types,
+ FirstSeenAt: time.Now(),
}
}
}
diff --git a/lib/dispatchcloud/test/stub_driver.go b/lib/dispatchcloud/test/stub_driver.go
index f57db0f09f..2265be6e16 100644
--- a/lib/dispatchcloud/test/stub_driver.go
+++ b/lib/dispatchcloud/test/stub_driver.go
@@ -20,6 +20,7 @@ import (
"git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/crunchrun"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
@@ -33,7 +34,10 @@ type StubDriver struct {
// SetupVM, if set, is called upon creation of each new
// StubVM. This is the caller's opportunity to customize the
// VM's error rate and other behaviors.
- SetupVM func(*StubVM)
+ //
+ // If SetupVM returns an error, that error will be returned to
+ // the caller of Create(), and the new VM will be discarded.
+ SetupVM func(*StubVM) error
// Bugf, if set, is called if a bug is detected in the caller
// or stub. Typically set to (*check.C)Errorf. If unset,
@@ -45,7 +49,8 @@ type StubDriver struct {
Queue *Queue
// Frequency of artificially introduced errors on calls to
- // Destroy. 0=always succeed, 1=always fail.
+ // Create and Destroy. 0=always succeed, 1=always fail.
+ ErrorRateCreate float64
ErrorRateDestroy float64
// If Create() or Instances() is called too frequently, return
@@ -53,6 +58,8 @@ type StubDriver struct {
MinTimeBetweenCreateCalls time.Duration
MinTimeBetweenInstancesCalls time.Duration
+ QuotaMaxInstances int
+
// If true, Create and Destroy calls block until Release() is
// called.
HoldCloudOps bool
@@ -62,7 +69,7 @@ type StubDriver struct {
}
// InstanceSet returns a new *StubInstanceSet.
-func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {
if sd.holdCloudOps == nil {
sd.holdCloudOps = make(chan bool)
}
@@ -108,7 +115,7 @@ type StubInstanceSet struct {
lastInstanceID int
}
-func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, cmd cloud.InitCommand, authKey ssh.PublicKey) (cloud.Instance, error) {
+func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, initCommand cloud.InitCommand, authKey ssh.PublicKey) (cloud.Instance, error) {
if sis.driver.HoldCloudOps {
sis.driver.holdCloudOps <- true
}
@@ -120,6 +127,12 @@ func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID,
if sis.allowCreateCall.After(time.Now()) {
return nil, RateLimitError{sis.allowCreateCall}
}
+ if math_rand.Float64() < sis.driver.ErrorRateCreate {
+ return nil, fmt.Errorf("StubInstanceSet: rand < ErrorRateCreate %f", sis.driver.ErrorRateCreate)
+ }
+ if max := sis.driver.QuotaMaxInstances; max > 0 && len(sis.servers) >= max {
+ return nil, QuotaError{fmt.Errorf("StubInstanceSet: reached QuotaMaxInstances %d", max)}
+ }
sis.allowCreateCall = time.Now().Add(sis.driver.MinTimeBetweenCreateCalls)
ak := sis.driver.AuthorizedKeys
if authKey != nil {
@@ -127,11 +140,11 @@ func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID,
}
sis.lastInstanceID++
svm := &StubVM{
+ InitCommand: initCommand,
sis: sis,
id: cloud.InstanceID(fmt.Sprintf("inst%d,%s", sis.lastInstanceID, it.ProviderType)),
tags: copyTags(tags),
providerType: it.ProviderType,
- initCommand: cmd,
running: map[string]stubProcess{},
killing: map[string]bool{},
}
@@ -142,7 +155,10 @@ func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID,
Exec: svm.Exec,
}
if setup := sis.driver.SetupVM; setup != nil {
- setup(svm)
+ err := setup(svm)
+ if err != nil {
+ return nil, err
+ }
}
sis.servers[svm.id] = svm
return svm.Instance(), nil
@@ -171,11 +187,26 @@ func (sis *StubInstanceSet) Stop() {
sis.stopped = true
}
+func (sis *StubInstanceSet) StubVMs() (svms []*StubVM) {
+ sis.mtx.Lock()
+ defer sis.mtx.Unlock()
+ for _, vm := range sis.servers {
+ svms = append(svms, vm)
+ }
+ return
+}
+
type RateLimitError struct{ Retry time.Time }
func (e RateLimitError) Error() string { return fmt.Sprintf("rate limited until %s", e.Retry) }
func (e RateLimitError) EarliestRetry() time.Time { return e.Retry }
+type CapacityError struct{ InstanceTypeSpecific bool }
+
+func (e CapacityError) Error() string { return "insufficient capacity" }
+func (e CapacityError) IsCapacityError() bool { return true }
+func (e CapacityError) IsInstanceTypeSpecific() bool { return e.InstanceTypeSpecific }
+
// StubVM is a fake server that runs an SSH service. It represents a
// VM running in a fake cloud.
//
@@ -196,16 +227,20 @@ type StubVM struct {
CrashRunningContainer func(arvados.Container)
ExtraCrunchRunArgs string // extra args expected after "crunch-run --detach --stdin-config "
+ // Populated by (*StubInstanceSet)Create()
+ InitCommand cloud.InitCommand
+
sis *StubInstanceSet
id cloud.InstanceID
tags cloud.InstanceTags
- initCommand cloud.InitCommand
providerType string
SSHService SSHService
running map[string]stubProcess
killing map[string]bool
lastPID int64
deadlocked string
+ stubprocs sync.WaitGroup
+ destroying bool
sync.Mutex
}
@@ -234,6 +269,17 @@ func (svm *StubVM) Instance() stubInstance {
}
func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader, stdout, stderr io.Writer) uint32 {
+ // Ensure we don't start any new stubprocs after Destroy()
+ // has started Wait()ing for stubprocs to end.
+ svm.Lock()
+ if svm.destroying {
+ svm.Unlock()
+ return 1
+ }
+ svm.stubprocs.Add(1)
+ defer svm.stubprocs.Done()
+ svm.Unlock()
+
stdinData, err := ioutil.ReadAll(stdin)
if err != nil {
fmt.Fprintf(stderr, "error reading stdin: %s\n", err)
@@ -271,7 +317,15 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
pid := svm.lastPID
svm.running[uuid] = stubProcess{pid: pid}
svm.Unlock()
+
time.Sleep(svm.CrunchRunDetachDelay)
+
+ svm.Lock()
+ defer svm.Unlock()
+ if svm.destroying {
+ fmt.Fprint(stderr, "crunch-run: killed by system shutdown\n")
+ return 9
+ }
fmt.Fprintf(stderr, "starting %s\n", uuid)
logger := svm.sis.logger.WithFields(logrus.Fields{
"Instance": svm.id,
@@ -279,13 +333,18 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
"PID": pid,
})
logger.Printf("[test] starting crunch-run stub")
+ svm.stubprocs.Add(1)
go func() {
+ defer svm.stubprocs.Done()
var ctr arvados.Container
var started, completed bool
defer func() {
logger.Print("[test] exiting crunch-run stub")
svm.Lock()
defer svm.Unlock()
+ if svm.destroying {
+ return
+ }
if svm.running[uuid].pid != pid {
bugf := svm.sis.driver.Bugf
if bugf == nil {
@@ -325,8 +384,10 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
svm.Lock()
killed := svm.killing[uuid]
+ delete(svm.killing, uuid)
+ destroying := svm.destroying
svm.Unlock()
- if killed || wantCrashEarly {
+ if killed || wantCrashEarly || destroying {
return
}
@@ -418,6 +479,10 @@ func (si stubInstance) Destroy() error {
if math_rand.Float64() < si.svm.sis.driver.ErrorRateDestroy {
return errors.New("instance could not be destroyed")
}
+ si.svm.Lock()
+ si.svm.destroying = true
+ si.svm.Unlock()
+ si.svm.stubprocs.Wait()
si.svm.SSHService.Close()
sis.mtx.Lock()
defer sis.mtx.Unlock()
@@ -470,3 +535,13 @@ func copyTags(src cloud.InstanceTags) cloud.InstanceTags {
}
return dst
}
+
+func (si stubInstance) PriceHistory(arvados.InstanceType) []cloud.InstancePrice {
+ return nil
+}
+
+type QuotaError struct {
+ error
+}
+
+func (QuotaError) IsQuotaError() bool { return true }
diff --git a/lib/dispatchcloud/worker/pool.go b/lib/dispatchcloud/worker/pool.go
index 66e0bfee91..13c369d0c6 100644
--- a/lib/dispatchcloud/worker/pool.go
+++ b/lib/dispatchcloud/worker/pool.go
@@ -82,6 +82,9 @@ const (
// instances have been shutdown.
quotaErrorTTL = time.Minute
+ // Time after a capacity error to try again
+ capacityErrorTTL = time.Minute
+
// Time between "X failed because rate limiting" messages
logRateLimitErrorInterval = time.Second * 10
)
@@ -106,11 +109,13 @@ func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *promethe
newExecutor: newExecutor,
cluster: cluster,
bootProbeCommand: cluster.Containers.CloudVMs.BootProbeCommand,
+ instanceInitCommand: cloud.InitCommand(cluster.Containers.CloudVMs.InstanceInitCommand),
runnerSource: cluster.Containers.CloudVMs.DeployRunnerBinary,
imageID: cloud.ImageID(cluster.Containers.CloudVMs.ImageID),
instanceTypes: cluster.InstanceTypes,
maxProbesPerSecond: cluster.Containers.CloudVMs.MaxProbesPerSecond,
maxConcurrentInstanceCreateOps: cluster.Containers.CloudVMs.MaxConcurrentInstanceCreateOps,
+ maxInstances: cluster.Containers.CloudVMs.MaxInstances,
probeInterval: duration(cluster.Containers.CloudVMs.ProbeInterval, defaultProbeInterval),
syncInterval: duration(cluster.Containers.CloudVMs.SyncInterval, defaultSyncInterval),
timeoutIdle: duration(cluster.Containers.CloudVMs.TimeoutIdle, defaultTimeoutIdle),
@@ -148,6 +153,7 @@ type Pool struct {
newExecutor func(cloud.Instance) Executor
cluster *arvados.Cluster
bootProbeCommand string
+ instanceInitCommand cloud.InitCommand
runnerSource string
imageID cloud.ImageID
instanceTypes map[string]arvados.InstanceType
@@ -155,6 +161,7 @@ type Pool struct {
probeInterval time.Duration
maxProbesPerSecond int
maxConcurrentInstanceCreateOps int
+ maxInstances int
timeoutIdle time.Duration
timeoutBooting time.Duration
timeoutProbe time.Duration
@@ -169,19 +176,21 @@ type Pool struct {
runnerArgs []string // extra args passed to crunch-run
// private state
- subscribers map[<-chan struct{}]chan<- struct{}
- creating map[string]createCall // unfinished (cloud.InstanceSet)Create calls (key is instance secret)
- workers map[cloud.InstanceID]*worker
- loaded bool // loaded list of instances from InstanceSet at least once
- exited map[string]time.Time // containers whose crunch-run proc has exited, but ForgetContainer has not been called
- atQuotaUntil time.Time
- atQuotaErr cloud.QuotaError
- stop chan bool
- mtx sync.RWMutex
- setupOnce sync.Once
- runnerData []byte
- runnerMD5 [md5.Size]byte
- runnerCmd string
+ subscribers map[<-chan struct{}]chan<- struct{}
+ creating map[string]createCall // unfinished (cloud.InstanceSet)Create calls (key is instance secret)
+ workers map[cloud.InstanceID]*worker
+ loaded bool // loaded list of instances from InstanceSet at least once
+ exited map[string]time.Time // containers whose crunch-run proc has exited, but ForgetContainer has not been called
+ atQuotaUntilFewerInstances int
+ atQuotaUntil time.Time
+ atQuotaErr cloud.QuotaError
+ atCapacityUntil map[string]time.Time
+ stop chan bool
+ mtx sync.RWMutex
+ setupOnce sync.Once
+ runnerData []byte
+ runnerMD5 [md5.Size]byte
+ runnerCmd string
mContainersRunning prometheus.Gauge
mInstances *prometheus.GaugeVec
@@ -195,6 +204,8 @@ type Pool struct {
mTimeFromShutdownToGone prometheus.Summary
mTimeFromQueueToCrunchRun prometheus.Summary
mRunProbeDuration *prometheus.SummaryVec
+ mProbeAgeMax prometheus.Gauge
+ mProbeAgeMedian prometheus.Gauge
}
type createCall struct {
@@ -302,10 +313,10 @@ func (wp *Pool) Unallocated() map[arvados.InstanceType]int {
// pool. The worker is added immediately; instance creation runs in
// the background.
//
-// Create returns false if a pre-existing error state prevents it from
-// even attempting to create a new instance. Those errors are logged
-// by the Pool, so the caller does not need to log anything in such
-// cases.
+// Create returns false if a pre-existing error or a configuration
+// setting prevents it from even attempting to create a new
+// instance. Those errors are logged by the Pool, so the caller does
+// not need to log anything in such cases.
func (wp *Pool) Create(it arvados.InstanceType) bool {
logger := wp.logger.WithField("InstanceType", it.Name)
wp.setupOnce.Do(wp.setup)
@@ -313,11 +324,11 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
// Boot probe is certain to fail.
return false
}
- wp.mtx.Lock()
- defer wp.mtx.Unlock()
- if time.Now().Before(wp.atQuotaUntil) || wp.instanceSet.throttleCreate.Error() != nil {
+ if wp.AtCapacity(it) || wp.AtQuota() || wp.instanceSet.throttleCreate.Error() != nil {
return false
}
+ wp.mtx.Lock()
+ defer wp.mtx.Unlock()
// The maxConcurrentInstanceCreateOps knob throttles the number of node create
// requests in flight. It was added to work around a limitation in Azure's
// managed disks, which support no more than 20 concurrent node creation
@@ -341,7 +352,7 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
wp.tagKeyPrefix + tagKeyIdleBehavior: string(IdleBehaviorRun),
wp.tagKeyPrefix + tagKeyInstanceSecret: secret,
}
- initCmd := TagVerifier{nil, secret, nil}.InitCommand()
+ initCmd := TagVerifier{nil, secret, nil}.InitCommand() + "\n" + wp.instanceInitCommand
inst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)
wp.mtx.Lock()
defer wp.mtx.Unlock()
@@ -352,8 +363,37 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
if err != nil {
if err, ok := err.(cloud.QuotaError); ok && err.IsQuotaError() {
wp.atQuotaErr = err
- wp.atQuotaUntil = time.Now().Add(quotaErrorTTL)
- time.AfterFunc(quotaErrorTTL, wp.notify)
+ n := len(wp.workers) + len(wp.creating) - 1
+ if n < 1 {
+ // Quota error with no
+ // instances running --
+ // nothing to do but wait
+ wp.atQuotaUntilFewerInstances = 0
+ wp.atQuotaUntil = time.Now().Add(quotaErrorTTL)
+ time.AfterFunc(quotaErrorTTL, wp.notify)
+ logger.WithField("atQuotaUntil", wp.atQuotaUntil).Info("quota error with 0 running -- waiting for quotaErrorTTL")
+ } else if n < wp.atQuotaUntilFewerInstances || wp.atQuotaUntilFewerInstances == 0 {
+ // Quota error with N
+ // instances running -- report
+ // AtQuota until some
+ // instances shut down
+ wp.atQuotaUntilFewerInstances = n
+ wp.atQuotaUntil = time.Time{}
+ logger.WithField("atQuotaUntilFewerInstances", n).Info("quota error -- waiting for next instance shutdown")
+ }
+ }
+ if err, ok := err.(cloud.CapacityError); ok && err.IsCapacityError() {
+ capKey := it.ProviderType
+ if !err.IsInstanceTypeSpecific() {
+ // set capacity flag for all
+ // instance types
+ capKey = ""
+ }
+ if wp.atCapacityUntil == nil {
+ wp.atCapacityUntil = map[string]time.Time{}
+ }
+ wp.atCapacityUntil[capKey] = time.Now().Add(capacityErrorTTL)
+ time.AfterFunc(capacityErrorTTL, wp.notify)
}
logger.WithError(err).Error("create failed")
wp.instanceSet.throttleCreate.CheckRateLimitError(err, wp.logger, "create instance", wp.notify)
@@ -361,15 +401,37 @@ func (wp *Pool) Create(it arvados.InstanceType) bool {
}
wp.updateWorker(inst, it)
}()
+ if len(wp.creating)+len(wp.workers) == wp.maxInstances {
+ logger.Infof("now at MaxInstances limit of %d instances", wp.maxInstances)
+ }
return true
}
+// AtCapacity returns true if Create() is currently expected to fail
+// for the given instance type.
+func (wp *Pool) AtCapacity(it arvados.InstanceType) bool {
+ wp.mtx.Lock()
+ defer wp.mtx.Unlock()
+ if t, ok := wp.atCapacityUntil[it.ProviderType]; ok && time.Now().Before(t) {
+ // at capacity for this instance type
+ return true
+ }
+ if t, ok := wp.atCapacityUntil[""]; ok && time.Now().Before(t) {
+ // at capacity for all instance types
+ return true
+ }
+ return false
+}
+
// AtQuota returns true if Create is not expected to work at the
-// moment.
+// moment (e.g., cloud provider has reported quota errors, or we are
+// already at our own configured quota).
func (wp *Pool) AtQuota() bool {
wp.mtx.Lock()
defer wp.mtx.Unlock()
- return time.Now().Before(wp.atQuotaUntil)
+ return wp.atQuotaUntilFewerInstances > 0 ||
+ time.Now().Before(wp.atQuotaUntil) ||
+ (wp.maxInstances > 0 && wp.maxInstances <= len(wp.workers)+len(wp.creating))
}
// SetIdleBehavior determines how the indicated instance will behave
@@ -389,10 +451,15 @@ func (wp *Pool) SetIdleBehavior(id cloud.InstanceID, idleBehavior IdleBehavior)
func (wp *Pool) reportSSHConnected(inst cloud.Instance) {
wp.mtx.Lock()
defer wp.mtx.Unlock()
- wkr := wp.workers[inst.ID()]
+ wkr, ok := wp.workers[inst.ID()]
+ if !ok {
+ // race: inst was removed from the pool
+ return
+ }
if wkr.state != StateBooting || !wkr.firstSSHConnection.IsZero() {
- // the node is not in booting state (can happen if a-d-c is restarted) OR
- // this is not the first SSH connection
+ // the node is not in booting state (can happen if
+ // a-d-c is restarted) OR this is not the first SSH
+ // connection
return
}
@@ -613,6 +680,20 @@ func (wp *Pool) registerMetrics(reg *prometheus.Registry) {
Help: "Number of containers reported running by cloud VMs.",
})
reg.MustRegister(wp.mContainersRunning)
+ wp.mProbeAgeMax = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "probe_age_seconds_max",
+ Help: "Maximum number of seconds since an instance's most recent successful probe.",
+ })
+ reg.MustRegister(wp.mProbeAgeMax)
+ wp.mProbeAgeMedian = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "probe_age_seconds_median",
+ Help: "Median number of seconds since an instance's most recent successful probe.",
+ })
+ reg.MustRegister(wp.mProbeAgeMedian)
wp.mInstances = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "arvados",
Subsystem: "dispatchcloud",
@@ -725,6 +806,8 @@ func (wp *Pool) updateMetrics() {
cpu := map[string]int64{}
mem := map[string]int64{}
var running int64
+ now := time.Now()
+ var probed []time.Time
for _, wkr := range wp.workers {
var cat string
switch {
@@ -744,6 +827,7 @@ func (wp *Pool) updateMetrics() {
cpu[cat] += int64(wkr.instType.VCPUs)
mem[cat] += int64(wkr.instType.RAM)
running += int64(len(wkr.running) + len(wkr.starting))
+ probed = append(probed, wkr.probed)
}
for _, cat := range []string{"inuse", "hold", "booting", "unknown", "idle"} {
wp.mInstancesPrice.WithLabelValues(cat).Set(price[cat])
@@ -760,6 +844,15 @@ func (wp *Pool) updateMetrics() {
wp.mInstances.WithLabelValues(k.cat, k.instType).Set(float64(v))
}
wp.mContainersRunning.Set(float64(running))
+
+ if len(probed) == 0 {
+ wp.mProbeAgeMax.Set(0)
+ wp.mProbeAgeMedian.Set(0)
+ } else {
+ sort.Slice(probed, func(i, j int) bool { return probed[i].Before(probed[j]) })
+ wp.mProbeAgeMax.Set(now.Sub(probed[0]).Seconds())
+ wp.mProbeAgeMedian.Set(now.Sub(probed[len(probed)/2]).Seconds())
+ }
}
func (wp *Pool) runProbes() {
@@ -865,6 +958,9 @@ func (wp *Pool) Instances() []InstanceView {
// KillInstance destroys a cloud VM instance. It returns an error if
// the given instance does not exist.
func (wp *Pool) KillInstance(id cloud.InstanceID, reason string) error {
+ wp.setupOnce.Do(wp.setup)
+ wp.mtx.Lock()
+ defer wp.mtx.Unlock()
wkr, ok := wp.workers[id]
if !ok {
return errors.New("instance not found")
@@ -986,6 +1082,14 @@ func (wp *Pool) sync(threshold time.Time, instances []cloud.Instance) {
notify = true
}
+ if wp.atQuotaUntilFewerInstances > len(wp.workers)+len(wp.creating) {
+ // After syncing, there are fewer instances (including
+ // pending creates) than there were last time we saw a
+ // quota error. This might mean it's now possible to
+ // create new instances. Reset our "at quota" state.
+ wp.atQuotaUntilFewerInstances = 0
+ }
+
if !wp.loaded {
notify = true
wp.loaded = true
diff --git a/lib/dispatchcloud/worker/pool_test.go b/lib/dispatchcloud/worker/pool_test.go
index 7b5634605f..8d2ba09ebe 100644
--- a/lib/dispatchcloud/worker/pool_test.go
+++ b/lib/dispatchcloud/worker/pool_test.go
@@ -78,7 +78,7 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
driver := &test.StubDriver{}
instanceSetID := cloud.InstanceSetID("test-instance-set-id")
- is, err := driver.InstanceSet(nil, instanceSetID, nil, suite.logger)
+ is, err := driver.InstanceSet(nil, instanceSetID, nil, suite.logger, nil)
c.Assert(err, check.IsNil)
newExecutor := func(cloud.Instance) Executor {
@@ -157,7 +157,7 @@ func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
func (suite *PoolSuite) TestDrain(c *check.C) {
driver := test.StubDriver{}
- instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger)
+ instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
ac := arvados.NewClientFromEnv()
@@ -210,7 +210,7 @@ func (suite *PoolSuite) TestDrain(c *check.C) {
func (suite *PoolSuite) TestNodeCreateThrottle(c *check.C) {
driver := test.StubDriver{HoldCloudOps: true}
- instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger)
+ instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
type1 := test.InstanceType(1)
@@ -250,7 +250,7 @@ func (suite *PoolSuite) TestNodeCreateThrottle(c *check.C) {
func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
driver := test.StubDriver{HoldCloudOps: true}
- instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger)
+ instanceSet, err := driver.InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
type1 := arvados.InstanceType{Name: "a1s", ProviderType: "a1.small", VCPUs: 1, RAM: 1 * GiB, Price: .01}
@@ -266,6 +266,7 @@ func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
type2.Name: type2,
type3.Name: type3,
},
+ instanceInitCommand: "echo 'instance init command goes here'",
}
notify := pool.Subscribe()
defer pool.Unsubscribe(notify)
@@ -294,6 +295,9 @@ func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
return len(pool.workers) == 4
})
+ vms := instanceSet.(*test.StubInstanceSet).StubVMs()
+ c.Check(string(vms[0].InitCommand), check.Matches, `umask 0177 && echo -n "[0-9a-f]+" >/var/run/arvados-instance-secret\necho 'instance init command goes here'`)
+
// Place type3 node on admin-hold
ivs := suite.instancesByType(pool, type3)
c.Assert(ivs, check.HasLen, 1)
diff --git a/lib/dispatchcloud/worker/runner.go b/lib/dispatchcloud/worker/runner.go
index 29c4b8e0a3..f22b8922ad 100644
--- a/lib/dispatchcloud/worker/runner.go
+++ b/lib/dispatchcloud/worker/runner.go
@@ -63,6 +63,9 @@ func newRemoteRunner(uuid string, wkr *worker) *remoteRunner {
configData.Cluster = wkr.wp.cluster
configData.KeepBuffers = bufs * wkr.instType.VCPUs
}
+ if wkr.wp.cluster.Containers.CloudVMs.Driver == "ec2" && wkr.instType.Preemptible {
+ configData.EC2SpotCheck = true
+ }
configJSON, err := json.Marshal(configData)
if err != nil {
panic(err)
@@ -135,7 +138,7 @@ func (rr *remoteRunner) Kill(reason string) {
termDeadline := time.Now().Add(rr.timeoutTERM)
t := time.NewTicker(rr.timeoutSignal)
defer t.Stop()
- for range t.C {
+ for ; ; <-t.C {
switch {
case rr.isClosed():
return
diff --git a/lib/dispatchcloud/worker/worker.go b/lib/dispatchcloud/worker/worker.go
index b01a820cd6..10a28157e4 100644
--- a/lib/dispatchcloud/worker/worker.go
+++ b/lib/dispatchcloud/worker/worker.go
@@ -6,16 +6,22 @@ package worker
import (
"bytes"
+ "encoding/json"
+ "errors"
"fmt"
+ "io"
+ "net"
"path/filepath"
"strings"
"sync"
"time"
"git.arvados.org/arvados.git/lib/cloud"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/stats"
"github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
)
const (
@@ -182,6 +188,14 @@ func (wkr *worker) startContainer(ctr arvados.Container) {
}
wkr.mtx.Lock()
defer wkr.mtx.Unlock()
+ if wkr.starting[ctr.UUID] != rr {
+ // Someone else (e.g., wkr.probeAndUpdate() ->
+ // wkr.updateRunning() or wkr.Close()) already
+ // moved our runner from wkr.starting to
+ // wkr.running or deleted it while we were in
+ // rr.Start().
+ return
+ }
now := time.Now()
wkr.updated = now
wkr.busy = now
@@ -234,6 +248,7 @@ func (wkr *worker) probeAndUpdate() {
ctrUUIDs []string
ok bool
stderr []byte // from probeBooted
+ errLast error // from probeBooted or copyRunnerData
)
switch initialState {
@@ -250,20 +265,33 @@ func (wkr *worker) probeAndUpdate() {
logger := wkr.logger.WithField("ProbeStart", probeStart)
if !booted {
- booted, stderr = wkr.probeBooted()
+ stderr, errLast = wkr.probeBooted()
+ booted = errLast == nil
+ shouldCopy := booted || initialState == StateUnknown
if !booted {
// Pretend this probe succeeded if another
// concurrent attempt succeeded.
wkr.mtx.Lock()
- booted = wkr.state == StateRunning || wkr.state == StateIdle
+ if wkr.state == StateRunning || wkr.state == StateIdle {
+ booted = true
+ shouldCopy = false
+ }
wkr.mtx.Unlock()
}
+ if shouldCopy {
+ _, stderrCopy, err := wkr.copyRunnerData()
+ if err != nil {
+ booted = false
+ wkr.logger.WithError(err).WithField("stderr", string(stderrCopy)).Warn("error copying runner binary")
+ errLast = err
+ }
+ }
if booted {
logger.Info("instance booted; will try probeRunning")
}
}
reportedBroken := false
- if booted || wkr.state == StateUnknown {
+ if booted || initialState == StateUnknown {
ctrUUIDs, reportedBroken, ok = wkr.probeRunning()
}
wkr.mtx.Lock()
@@ -288,17 +316,17 @@ func (wkr *worker) probeAndUpdate() {
dur := probeStart.Sub(wkr.probed)
if wkr.shutdownIfBroken(dur) {
// stderr from failed run-probes will have
- // been logged already, but boot-probe
+ // been logged already, but some boot-probe
// failures are normal so they are logged only
- // at Debug level. This is our chance to log
- // some evidence about why the node never
+ // at Debug level. This may be our chance to
+ // log some evidence about why the node never
// booted, even in non-debug mode.
if !booted {
wkr.reportBootOutcome(BootOutcomeFailed)
logger.WithFields(logrus.Fields{
"Duration": dur,
"stderr": string(stderr),
- }).Info("boot failed")
+ }).WithError(errLast).Info("boot failed")
}
}
return
@@ -381,7 +409,12 @@ func (wkr *worker) probeRunning() (running []string, reportsBroken, ok bool) {
cmd = "sudo " + cmd
}
before := time.Now()
- stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
+ var stdin io.Reader
+ if prices := wkr.instance.PriceHistory(wkr.instType); len(prices) > 0 {
+ j, _ := json.Marshal(prices)
+ stdin = bytes.NewReader(j)
+ }
+ stdout, stderr, err := wkr.executor.Execute(nil, cmd, stdin)
if err != nil {
wkr.logger.WithFields(logrus.Fields{
"Command": cmd,
@@ -444,7 +477,7 @@ func (wkr *worker) probeRunning() (running []string, reportsBroken, ok bool) {
return
}
-func (wkr *worker) probeBooted() (ok bool, stderr []byte) {
+func (wkr *worker) probeBooted() (stderr []byte, err error) {
cmd := wkr.wp.bootProbeCommand
if cmd == "" {
cmd = "true"
@@ -456,25 +489,41 @@ func (wkr *worker) probeBooted() (ok bool, stderr []byte) {
"stderr": string(stderr),
})
if err != nil {
- logger.WithError(err).Debug("boot probe failed")
- return false, stderr
+ if errors.Is(err, sshexecutor.ErrNoAddress) ||
+ errors.As(err, new(*net.OpError)) ||
+ errors.As(err, new(*ssh.ExitError)) {
+ // These errors are expected while the
+ // instance is booting, so we only log them at
+ // debug level.
+ logger.WithError(err).Debug("boot probe failed")
+ } else {
+ // Other errors are more likely to indicate a
+ // configuration problem, and it's more
+ // sysadmin-friendly to show them right away
+ // instead of waiting until boot timeout and
+ // only showing the last error.
+ //
+ // Example: "ssh: handshake failed: ssh:
+ // unable to authenticate, attempted methods
+ // [none publickey], no supported methods
+ // remain"
+ logger.WithError(err).Warn("boot probe failed")
+ }
+ return stderr, err
}
logger.Info("boot probe succeeded")
+ return stderr, nil
+}
+
+func (wkr *worker) copyRunnerData() (stdout, stderr []byte, err error) {
if err = wkr.wp.loadRunnerData(); err != nil {
wkr.logger.WithError(err).Warn("cannot boot worker: error loading runner binary")
- return false, stderr
+ return
} else if len(wkr.wp.runnerData) == 0 {
// Assume crunch-run is already installed
- } else if _, stderr2, err := wkr.copyRunnerData(); err != nil {
- wkr.logger.WithError(err).WithField("stderr", string(stderr2)).Warn("error copying runner binary")
- return false, stderr2
- } else {
- stderr = append(stderr, stderr2...)
+ return
}
- return true, stderr
-}
-func (wkr *worker) copyRunnerData() (stdout, stderr []byte, err error) {
hash := fmt.Sprintf("%x", wkr.wp.runnerMD5)
dstdir, _ := filepath.Split(wkr.wp.runnerCmd)
logger := wkr.logger.WithFields(logrus.Fields{
@@ -506,9 +555,11 @@ func (wkr *worker) shutdownIfBroken(dur time.Duration) bool {
// Never shut down.
return false
}
- label, threshold := "", wkr.wp.timeoutProbe
+ prologue, epilogue, threshold := "", "", wkr.wp.timeoutProbe
if wkr.state == StateUnknown || wkr.state == StateBooting {
- label, threshold = "new ", wkr.wp.timeoutBooting
+ prologue = "new "
+ epilogue = " -- `arvados-server cloudtest` might help troubleshoot, see https://doc.arvados.org/main/admin/cloudtest.html"
+ threshold = wkr.wp.timeoutBooting
}
if dur < threshold {
return false
@@ -517,7 +568,7 @@ func (wkr *worker) shutdownIfBroken(dur time.Duration) bool {
"Duration": dur,
"Since": wkr.probed,
"State": wkr.state,
- }).Warnf("%sinstance unresponsive, shutting down", label)
+ }).Warnf("%sinstance unresponsive, shutting down%s", prologue, epilogue)
wkr.shutdown()
return true
}
@@ -624,10 +675,12 @@ func (wkr *worker) Close() {
for uuid, rr := range wkr.running {
wkr.logger.WithField("ContainerUUID", uuid).Info("crunch-run process abandoned")
rr.Close()
+ delete(wkr.running, uuid)
}
for uuid, rr := range wkr.starting {
wkr.logger.WithField("ContainerUUID", uuid).Info("crunch-run process abandoned")
rr.Close()
+ delete(wkr.starting, uuid)
}
}
diff --git a/lib/dispatchcloud/worker/worker_test.go b/lib/dispatchcloud/worker/worker_test.go
index 2ee6b7c362..5d8c67e916 100644
--- a/lib/dispatchcloud/worker/worker_test.go
+++ b/lib/dispatchcloud/worker/worker_test.go
@@ -43,7 +43,7 @@ func (suite *WorkerSuite) TestProbeAndUpdate(c *check.C) {
probeTimeout := time.Second
ac := arvados.NewClientFromEnv()
- is, err := (&test.StubDriver{}).InstanceSet(nil, "test-instance-set-id", nil, suite.logger)
+ is, err := (&test.StubDriver{}).InstanceSet(nil, "test-instance-set-id", nil, suite.logger, nil)
c.Assert(err, check.IsNil)
inst, err := is.Create(arvados.InstanceType{}, "", nil, "echo InitCommand", nil)
c.Assert(err, check.IsNil)
@@ -122,6 +122,39 @@ func (suite *WorkerSuite) TestProbeAndUpdate(c *check.C) {
expectState: StateUnknown,
expectRunning: 1,
},
+ {
+ testCaseComment: "Unknown, boot probe fails, deployRunner succeeds, container is running",
+ state: StateUnknown,
+ respBoot: respFail,
+ respRun: respFail,
+ respRunDeployed: respContainerRunning,
+ deployRunner: []byte("ELF"),
+ expectStdin: []byte("ELF"),
+ expectState: StateUnknown,
+ expectRunning: 1,
+ },
+ {
+ testCaseComment: "Unknown, boot timeout exceeded, boot probe fails but deployRunner succeeds and container is running",
+ state: StateUnknown,
+ age: bootTimeout * 2,
+ respBoot: respFail,
+ respRun: respFail,
+ respRunDeployed: respContainerRunning,
+ deployRunner: []byte("ELF"),
+ expectStdin: []byte("ELF"),
+ expectState: StateUnknown,
+ expectRunning: 1,
+ },
+ {
+ testCaseComment: "Unknown, boot timeout exceeded, boot probe fails but deployRunner succeeds and no container is running",
+ state: StateUnknown,
+ age: bootTimeout * 2,
+ respBoot: respFail,
+ respRun: respFail,
+ deployRunner: []byte("ELF"),
+ expectStdin: []byte("ELF"),
+ expectState: StateShutdown,
+ },
{
testCaseComment: "Booting, boot probe fails, run probe fails",
state: StateBooting,
diff --git a/lib/install/arvados.service b/lib/install/arvados.service
index 3b68f31e9f..f536001f77 100644
--- a/lib/install/arvados.service
+++ b/lib/install/arvados.service
@@ -7,8 +7,6 @@ Description=Arvados server
Documentation=https://doc.arvados.org/
After=network.target
AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
StartLimitIntervalSec=0
[Service]
@@ -21,8 +19,5 @@ Restart=always
RestartSec=1
LimitNOFILE=65536
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
[Install]
WantedBy=multi-user.target
diff --git a/lib/install/arvadostest_docker_build.sh b/lib/install/arvadostest_docker_build.sh
index e0defa888a..3f0245293e 100755
--- a/lib/install/arvadostest_docker_build.sh
+++ b/lib/install/arvadostest_docker_build.sh
@@ -1,4 +1,8 @@
#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
set -ex -o pipefail
@@ -7,7 +11,7 @@ SRC=$(realpath $(dirname ${BASH_SOURCE[0]})/../..)
ctrname=arvadostest
ctrbase=${ctrname}
if [[ "${1}" != "--update" ]] || ! docker images --format={{.Repository}} | grep -x ${ctrbase}; then
- ctrbase=debian:10
+ ctrbase=debian:11
fi
if docker ps -a --format={{.Names}} | grep -x ${ctrname}; then
diff --git a/lib/install/deps.go b/lib/install/deps.go
index e02c3743e7..146c645eca 100644
--- a/lib/install/deps.go
+++ b/lib/install/deps.go
@@ -17,6 +17,7 @@ import (
"os/exec"
"os/user"
"path/filepath"
+ "regexp"
"runtime"
"strconv"
"strings"
@@ -30,28 +31,32 @@ import (
var Command cmd.Handler = &installCommand{}
-const goversion = "1.17.7"
+const goversion = "1.20.6"
const (
- rubyversion = "2.7.5"
- bundlerversion = "2.2.19"
- singularityversion = "3.9.9"
- pjsversion = "1.9.8"
- geckoversion = "0.24.0"
- gradleversion = "5.3.1"
- nodejsversion = "v12.22.11"
- devtestDatabasePassword = "insecure_arvados_test"
- workbench2version = "2454ac35292a79594c32a80430740317ed5005cf"
+ defaultRubyVersion = "3.2.2"
+ defaultBundlerVersion = "~> 2.4.0"
+ defaultSingularityVersion = "3.10.4"
+ pjsversion = "1.9.8"
+ geckoversion = "0.24.0"
+ gradleversion = "5.3.1"
+ defaultNodejsVersion = "14.21.3"
+ devtestDatabasePassword = "insecure_arvados_test"
)
//go:embed arvados.service
var arvadosServiceFile []byte
type installCommand struct {
- ClusterType string
- SourcePath string
- PackageVersion string
- EatMyData bool
+ ClusterType string
+ SourcePath string
+ Commit string
+ PackageVersion string
+ RubyVersion string
+ BundlerVersion string
+ SingularityVersion string
+ NodejsVersion string
+ EatMyData bool
}
func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
@@ -72,7 +77,12 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
flags.StringVar(&inst.ClusterType, "type", "production", "cluster `type`: development, test, production, or package")
flags.StringVar(&inst.SourcePath, "source", "/arvados", "source tree location (required for -type=package)")
+ flags.StringVar(&inst.Commit, "commit", "", "source commit `hash` to embed (blank means use 'git log' or all-zero placeholder)")
flags.StringVar(&inst.PackageVersion, "package-version", "0.0.0", "version string to embed in executable files")
+ flags.StringVar(&inst.RubyVersion, "ruby-version", defaultRubyVersion, "Ruby `version` to install (do not override in production mode)")
+ flags.StringVar(&inst.BundlerVersion, "bundler-version", defaultBundlerVersion, "Bundler `version` to install (do not override in production mode)")
+ flags.StringVar(&inst.SingularityVersion, "singularity-version", defaultSingularityVersion, "Singularity `version` to install (do not override in production mode)")
+ flags.StringVar(&inst.NodejsVersion, "nodejs-version", defaultNodejsVersion, "Nodejs `version` to install (not applicable in production mode)")
flags.BoolVar(&inst.EatMyData, "eatmydata", false, "use eatmydata to speed up install")
if ok, code := cmd.ParseFlags(flags, prog, args, "", stderr); !ok {
@@ -81,6 +91,14 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
return cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)
}
+ if inst.Commit == "" {
+ if commit, err := exec.Command("env", "-C", inst.SourcePath, "git", "log", "-n1", "--format=%H").CombinedOutput(); err == nil {
+ inst.Commit = strings.TrimSpace(string(commit))
+ } else {
+ inst.Commit = "0000000000000000000000000000000000000000"
+ }
+ }
+
var dev, test, prod, pkg bool
switch inst.ClusterType {
case "development":
@@ -101,6 +119,23 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
return 1
}
+ if ok, _ := regexp.MatchString(`^\d\.\d+\.\d+$`, inst.RubyVersion); !ok {
+ fmt.Fprintf(stderr, "invalid argument %q for -ruby-version\n", inst.RubyVersion)
+ return 2
+ }
+ if ok, _ := regexp.MatchString(`^ *(|~>|[<>!=]=) *\d`, inst.BundlerVersion); !ok {
+ fmt.Fprintf(stderr, "invalid argument %q for -bundler-version\n", inst.BundlerVersion)
+ return 2
+ }
+ if ok, _ := regexp.MatchString(`^\d`, inst.SingularityVersion); !ok {
+ fmt.Fprintf(stderr, "invalid argument %q for -singularity-version\n", inst.SingularityVersion)
+ return 2
+ }
+ if ok, _ := regexp.MatchString(`^\d`, inst.NodejsVersion); !ok {
+ fmt.Fprintf(stderr, "invalid argument %q for -nodejs-version\n", inst.NodejsVersion)
+ return 2
+ }
+
osv, err := identifyOS()
if err != nil {
return 1
@@ -155,19 +190,18 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
"default-jre-headless",
"gettext",
"libattr1-dev",
- "libcrypt-ssleay-perl",
+ "libffi-dev",
"libfuse-dev",
"libgbm1", // cypress / workbench2 tests
"libgnutls28-dev",
- "libjson-perl",
"libpam-dev",
"libpcre3-dev",
"libpq-dev",
"libreadline-dev",
"libssl-dev",
- "libwww-perl",
"libxml2-dev",
"libxslt1-dev",
+ "libyaml-dev",
"linkchecker",
"lsof",
"make",
@@ -196,22 +230,30 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
if test {
if osv.Debian && osv.Major <= 10 {
pkgs = append(pkgs, "iceweasel")
+ } else if osv.Debian && osv.Major >= 11 {
+ pkgs = append(pkgs, "firefox-esr")
} else {
pkgs = append(pkgs, "firefox")
}
+ if osv.Debian && osv.Major >= 11 {
+ // not available in Debian <11
+ pkgs = append(pkgs, "s3cmd")
+ }
}
if dev || test {
- pkgs = append(pkgs, "squashfs-tools") // for singularity
- pkgs = append(pkgs, "gnupg") // for docker install recipe
+ pkgs = append(pkgs,
+ "libglib2.0-dev", // singularity (conmon)
+ "libseccomp-dev", // singularity (seccomp)
+ "squashfs-tools", // singularity
+ "gnupg") // docker install recipe
}
switch {
- case osv.Debian && osv.Major >= 11:
- pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev", "perl-modules-5.32")
- case osv.Debian && osv.Major >= 10:
- pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev", "perl-modules")
+ case osv.Debian && osv.Major >= 10,
+ osv.Ubuntu && osv.Major >= 22:
+ pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev")
case osv.Debian || osv.Ubuntu:
- pkgs = append(pkgs, "g++", "libcurl3", "libcurl3-openssl-dev", "perl-modules")
- case osv.Centos:
+ pkgs = append(pkgs, "g++", "libcurl3", "libcurl3-openssl-dev")
+ case osv.RedHat:
pkgs = append(pkgs, "gcc", "gcc-c++", "libcurl-devel", "postgresql-devel")
}
cmd := exec.CommandContext(ctx, "apt-get")
@@ -230,15 +272,15 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
}
if dev || test {
- if havedockerversion, err := exec.Command("docker", "--version").CombinedOutput(); err == nil {
+ if havedockerversion, err2 := exec.Command("docker", "--version").CombinedOutput(); err2 == nil {
logger.Printf("%s installed, assuming that version is ok", bytes.TrimSuffix(havedockerversion, []byte("\n")))
} else if osv.Debian {
var codename string
switch osv.Major {
- case 10:
- codename = "buster"
case 11:
codename = "bullseye"
+ case 12:
+ codename = "bookworm"
default:
err = fmt.Errorf("don't know how to install docker-ce for debian %d", osv.Major)
return 1
@@ -246,7 +288,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
err = inst.runBash(`
rm -f /usr/share/keyrings/docker-archive-keyring.gpg
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
-echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian/ `+codename+` stable' | \
+echo 'deb [arch=`+runtime.GOARCH+` signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian/ `+codename+` stable' | \
tee /etc/apt/sources.list.d/docker.list
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get --yes --no-install-recommends install docker-ce
@@ -258,6 +300,21 @@ DEBIAN_FRONTEND=noninteractive apt-get --yes --no-install-recommends install doc
err = fmt.Errorf("don't know how to install docker for osversion %v", osv)
return 1
}
+
+ err = inst.runBash(`
+key=fs.inotify.max_user_watches
+min=524288
+if [[ "$(sysctl --values "${key}")" -lt "${min}" ]]; then
+ sysctl "${key}=${min}"
+ # writing sysctl worked, so we should make it permanent
+ echo "${key}=${min}" | tee -a /etc/sysctl.conf
+ sysctl -p
+fi
+`, stdout, stderr)
+ if err != nil {
+ err = fmt.Errorf("couldn't set fs.inotify.max_user_watches value. (Is this a docker container? Fix this on the docker host by adding fs.inotify.max_user_watches=524288 to /etc/sysctl.conf and running `sysctl -p`)")
+ return 1
+ }
}
os.Mkdir("/var/lib/arvados", 0755)
@@ -276,19 +333,25 @@ DEBIAN_FRONTEND=noninteractive apt-get --yes --no-install-recommends install doc
return 1
}
}
- rubymajorversion := rubyversion[:strings.LastIndex(rubyversion, ".")]
- if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+rubyversion)) {
- logger.Print("ruby " + rubyversion + " already installed")
+ rubyminorversion := inst.RubyVersion[:strings.LastIndex(inst.RubyVersion, ".")]
+ if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+inst.RubyVersion)) {
+ logger.Print("ruby " + inst.RubyVersion + " already installed")
} else {
err = inst.runBash(`
+rubyversion="`+inst.RubyVersion+`"
+rubyminorversion="`+rubyminorversion+`"
tmp="$(mktemp -d)"
trap 'rm -r "${tmp}"' ERR EXIT
-wget --progress=dot:giga -O- https://cache.ruby-lang.org/pub/ruby/`+rubymajorversion+`/ruby-`+rubyversion+`.tar.gz | tar -C "${tmp}" -xzf -
-cd "${tmp}/ruby-`+rubyversion+`"
+wget --progress=dot:giga -O- "https://cache.ruby-lang.org/pub/ruby/$rubyminorversion/ruby-$rubyversion.tar.gz" | tar -C "${tmp}" -xzf -
+cd "${tmp}/ruby-$rubyversion"
./configure --disable-install-static-library --enable-shared --disable-install-doc --prefix /var/lib/arvados
make -j8
+rm -f /var/lib/arvados/bin/erb
make install
-/var/lib/arvados/bin/gem install bundler --no-document
+if [[ "$rubyversion" > "3" ]]; then
+ /var/lib/arvados/bin/gem update --no-document --system 3.4.21
+fi
+/var/lib/arvados/bin/gem install --conservative --no-document --version '`+inst.BundlerVersion+`' bundler
`, stdout, stderr)
if err != nil {
return 1
@@ -302,7 +365,7 @@ make install
err = inst.runBash(`
cd /tmp
rm -rf /var/lib/arvados/go/
-wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf -
+wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-`+runtime.GOARCH+`.tar.gz | tar -C /var/lib/arvados -xzf -
ln -sfv /var/lib/arvados/go/bin/* /usr/local/bin/
`, stdout, stderr)
if err != nil {
@@ -312,32 +375,6 @@ ln -sfv /var/lib/arvados/go/bin/* /usr/local/bin/
}
if !prod && !pkg {
- if havepjsversion, err := exec.Command("/usr/local/bin/phantomjs", "--version").CombinedOutput(); err == nil && string(havepjsversion) == "1.9.8\n" {
- logger.Print("phantomjs " + pjsversion + " already installed")
- } else {
- err = inst.runBash(`
-PJS=phantomjs-`+pjsversion+`-linux-x86_64
-wget --progress=dot:giga -O- https://cache.arvados.org/$PJS.tar.bz2 | tar -C /var/lib/arvados -xjf -
-ln -sfv /var/lib/arvados/$PJS/bin/phantomjs /usr/local/bin/
-`, stdout, stderr)
- if err != nil {
- return 1
- }
- }
-
- if havegeckoversion, err := exec.Command("/usr/local/bin/geckodriver", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegeckoversion), " "+geckoversion+" ") {
- logger.Print("geckodriver " + geckoversion + " already installed")
- } else {
- err = inst.runBash(`
-GD=v`+geckoversion+`
-wget --progress=dot:giga -O- https://github.com/mozilla/geckodriver/releases/download/$GD/geckodriver-$GD-linux64.tar.gz | tar -C /var/lib/arvados/bin -xzf - geckodriver
-ln -sfv /var/lib/arvados/bin/geckodriver /usr/local/bin/
-`, stdout, stderr)
- if err != nil {
- return 1
- }
- }
-
if havegradleversion, err := exec.Command("/usr/local/bin/gradle", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegradleversion), "Gradle "+gradleversion+"\n") {
logger.Print("gradle " + gradleversion + " already installed")
} else {
@@ -355,15 +392,15 @@ rm ${zip}
}
}
- if havesingularityversion, err := exec.Command("/var/lib/arvados/bin/singularity", "--version").CombinedOutput(); err == nil && strings.Contains(string(havesingularityversion), singularityversion) {
- logger.Print("singularity " + singularityversion + " already installed")
+ if havesingularityversion, err := exec.Command("/var/lib/arvados/bin/singularity", "--version").CombinedOutput(); err == nil && strings.Contains(string(havesingularityversion), inst.SingularityVersion) {
+ logger.Print("singularity " + inst.SingularityVersion + " already installed")
} else if dev || test {
err = inst.runBash(`
-S=`+singularityversion+`
+S=`+inst.SingularityVersion+`
tmp=/var/lib/arvados/tmp/singularity
trap "rm -r ${tmp}" ERR EXIT
cd /var/lib/arvados/tmp
-git clone https://github.com/sylabs/singularity
+git clone --recurse-submodules https://github.com/sylabs/singularity
cd singularity
git checkout v${S}
./mconfig --prefix=/var/lib/arvados
@@ -504,14 +541,23 @@ setcap "cap_sys_admin+pei cap_sys_chroot+pei" /var/lib/arvados/bin/nsenter
}
}
+ var njsArch string
+ switch runtime.GOARCH {
+ case "amd64":
+ njsArch = "x64"
+ default:
+ njsArch = runtime.GOARCH
+ }
+
if !prod {
- if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == nodejsversion+"\n" {
- logger.Print("nodejs " + nodejsversion + " already installed")
+ if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == "v"+inst.NodejsVersion+"\n" {
+ logger.Print("nodejs " + inst.NodejsVersion + " already installed")
} else {
err = inst.runBash(`
-NJS=`+nodejsversion+`
-wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
-ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
+NJS=v`+inst.NodejsVersion+`
+rm -rf /var/lib/arvados/node-*-linux-`+njsArch+`
+wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-`+njsArch+`.tar.xz | sudo tar -C /var/lib/arvados -xJf -
+ln -sfv /var/lib/arvados/node-${NJS}-linux-`+njsArch+`/bin/{node,npm} /usr/local/bin/
`, stdout, stderr)
if err != nil {
return 1
@@ -523,44 +569,12 @@ ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
} else {
err = inst.runBash(`
npm install -g yarn
-ln -sfv /var/lib/arvados/node-`+nodejsversion+`-linux-x64/bin/{yarn,yarnpkg} /usr/local/bin/
+ln -sfv /var/lib/arvados/node-v`+inst.NodejsVersion+`-linux-`+njsArch+`/bin/{yarn,yarnpkg} /usr/local/bin/
`, stdout, stderr)
if err != nil {
return 1
}
}
-
- if havewb2version, err := exec.Command("git", "--git-dir=/var/lib/arvados/arvados-workbench2/.git", "log", "-n1", "--format=%H").CombinedOutput(); err == nil && string(havewb2version) == workbench2version+"\n" {
- logger.Print("workbench2 repo is already at " + workbench2version)
- } else {
- err = inst.runBash(`
-V=`+workbench2version+`
-cd /var/lib/arvados
-if [[ ! -e arvados-workbench2 ]]; then
- git clone https://git.arvados.org/arvados-workbench2.git
- cd arvados-workbench2
- git checkout $V
-else
- cd arvados-workbench2
- if ! git checkout $V; then
- git fetch
- git checkout yarn.lock
- git checkout $V
- fi
-fi
-rm -rf build
-`, stdout, stderr)
- if err != nil {
- return 1
- }
- }
-
- if err = inst.runBash(`
-cd /var/lib/arvados/arvados-workbench2
-yarn install
-`, stdout, stderr); err != nil {
- return 1
- }
}
if prod || pkg {
@@ -570,7 +584,16 @@ yarn install
"cmd/arvados-server",
} {
fmt.Fprintf(stderr, "building %s...\n", srcdir)
- cmd := exec.Command("go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+inst.PackageVersion+" -X main.version="+inst.PackageVersion+" -s -w")
+ // -buildvcs=false here avoids a fatal "error
+ // obtaining VCS status" when git refuses to
+ // run (for example) as root in a docker
+ // container using a non-root-owned git tree
+ // mounted from the host -- as in
+ // "arvados-package build".
+ cmd := exec.Command("go", "install", "-buildvcs=false",
+ "-ldflags", "-s -w"+
+ " -X git.arvados.org/arvados.git/lib/cmd.version="+inst.PackageVersion+
+ " -X git.arvados.org/arvados.git/lib/cmd.commit="+inst.Commit)
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, "GOBIN=/var/lib/arvados/bin")
cmd.Dir = filepath.Join(inst.SourcePath, srcdir)
@@ -605,94 +628,103 @@ v=/var/lib/arvados/lib/python
tmp=/var/lib/arvados/tmp/python
python3 -m venv "$v"
. "$v/bin/activate"
-pip3 install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
+pip3 install --no-cache-dir 'setuptools>=68' 'pip>=20'
export ARVADOS_BUILDING_VERSION="`+inst.PackageVersion+`"
for src in "`+inst.SourcePath+`/sdk/python" "`+inst.SourcePath+`/services/fuse"; do
rsync -a --delete-after "$src/" "$tmp/"
- cd "$tmp"
- python3 setup.py install
- cd ..
+ env -C "$tmp" python3 setup.py build
+ pip3 install "$tmp"
rm -rf "$tmp"
done
`, stdout, stderr); err != nil {
return 1
}
- // Install Rails apps to /var/lib/arvados/{railsapi,workbench1}/
- for dstdir, srcdir := range map[string]string{
- "railsapi": "services/api",
- "workbench1": "apps/workbench",
+ // Install RailsAPI to /var/lib/arvados/railsapi/
+ fmt.Fprintln(stderr, "building railsapi...")
+ cmd = exec.Command("rsync",
+ "-a", "--no-owner", "--no-group", "--delete-after", "--delete-excluded",
+ "--exclude", "/coverage",
+ "--exclude", "/log",
+ "--exclude", "/node_modules",
+ "--exclude", "/tmp",
+ "--exclude", "/public/assets",
+ "--exclude", "/vendor",
+ "--exclude", "/config/environments",
+ "./", "/var/lib/arvados/railsapi/")
+ cmd.Dir = filepath.Join(inst.SourcePath, "services", "api")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return 1
+ }
+ for _, cmdline := range [][]string{
+ {"mkdir", "-p", "log", "public/assets", "tmp", "vendor", ".bundle", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger"},
+ {"touch", "log/production.log"},
+ {"chown", "-R", "--from=root", "www-data:www-data", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger", "log", "tmp", "vendor", ".bundle", "Gemfile.lock", "config.ru", "config/environment.rb"},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/gem", "install", "--user", "--conservative", "--no-document", "bundler:" + inst.BundlerVersion},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "deployment", "true"},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "path", "/var/www/.gem"},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "without", "development test diagnostics performance"},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "install", "--jobs", fmt.Sprintf("%d", runtime.NumCPU())},
+
+ {"chown", "www-data:www-data", ".", "public/assets"},
+ // {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "system", "true"},
+ {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "npm:install"},
+ {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "assets:precompile"},
+ {"chown", "root:root", "."},
+ {"chown", "-R", "root:root", "public/assets", "vendor"},
+
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "build-native-support"},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "install-standalone-runtime"},
} {
- fmt.Fprintf(stderr, "building %s...\n", srcdir)
- cmd := exec.Command("rsync",
- "-a", "--no-owner", "--no-group", "--delete-after", "--delete-excluded",
- "--exclude", "/coverage",
- "--exclude", "/log",
- "--exclude", "/node_modules",
- "--exclude", "/tmp",
- "--exclude", "/public/assets",
- "--exclude", "/vendor",
- "--exclude", "/config/environments",
- "./", "/var/lib/arvados/"+dstdir+"/")
- cmd.Dir = filepath.Join(inst.SourcePath, srcdir)
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- err = cmd.Run()
- if err != nil {
- return 1
- }
- for _, cmdline := range [][]string{
- {"mkdir", "-p", "log", "public/assets", "tmp", "vendor", ".bundle", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger"},
- {"touch", "log/production.log"},
- {"chown", "-R", "--from=root", "www-data:www-data", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger", "log", "tmp", "vendor", ".bundle", "Gemfile.lock", "config.ru", "config/environment.rb"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/gem", "install", "--user", "--conservative", "--no-document", "bundler:" + bundlerversion},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "deployment", "true"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "path", "/var/www/.gem"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "without", "development test diagnostics performance"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "install", "--jobs", fmt.Sprintf("%d", runtime.NumCPU())},
-
- {"chown", "www-data:www-data", ".", "public/assets"},
- // {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "system", "true"},
- {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "npm:install"},
- {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "assets:precompile"},
- {"chown", "root:root", "."},
- {"chown", "-R", "root:root", "public/assets", "vendor"},
-
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "build-native-support"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "install-standalone-runtime"},
- } {
- if cmdline[len(cmdline)-2] == "rake" && dstdir != "workbench1" {
- continue
- }
- cmd = exec.Command(cmdline[0], cmdline[1:]...)
- cmd.Dir = "/var/lib/arvados/" + dstdir
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- fmt.Fprintf(stderr, "... %s\n", cmd.Args)
- err = cmd.Run()
- if err != nil {
- return 1
- }
+ if cmdline[len(cmdline)-2] == "rake" {
+ continue
}
- cmd = exec.Command("sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "validate-install")
- cmd.Dir = "/var/lib/arvados/" + dstdir
+ cmd = exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Dir = "/var/lib/arvados/railsapi"
cmd.Stdout = stdout
cmd.Stderr = stderr
+ fmt.Fprintf(stderr, "... %s\n", cmd.Args)
err = cmd.Run()
- if err != nil && !strings.Contains(err.Error(), "exit status 2") {
- // Exit code 2 indicates there were warnings (like
- // "other passenger installations have been detected",
- // which we can't expect to avoid) but no errors.
- // Other non-zero exit codes (1, 9) indicate errors.
+ if err != nil {
return 1
}
}
+ cmd = exec.Command("sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "validate-install")
+ cmd.Dir = "/var/lib/arvados/railsapi"
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil && !strings.Contains(err.Error(), "exit status 2") {
+ // Exit code 2 indicates there were warnings (like
+ // "other passenger installations have been detected",
+ // which we can't expect to avoid) but no errors.
+ // Other non-zero exit codes (1, 9) indicate errors.
+ return 1
+ }
- // Install workbench2 app to /var/lib/arvados/workbench2/
+ // Install workbench2 app to
+ // /var/lib/arvados/workbench2/.
+ //
+ // We copy the source tree from the (possibly
+ // readonly) source tree into a temp dir because `yarn
+ // build` writes to {source-tree}/build/. When we
+ // upgrade to react-scripts >= 4.0.2 we may be able to
+ // build from the source dir and write directly to the
+ // final destination (using
+ // YARN_INSTALL_STATE_PATH=/dev/null
+ // BUILD_PATH=/var/lib/arvados/workbench2) instead of
+ // using two rsync steps here.
if err = inst.runBash(`
-cd /var/lib/arvados/arvados-workbench2
-VERSION="`+inst.PackageVersion+`" BUILD_NUMBER=1 GIT_COMMIT="`+workbench2version[:9]+`" yarn build
-rsync -a --delete-after build/ /var/lib/arvados/workbench2/
+src="`+inst.SourcePath+`/services/workbench2"
+tmp=/var/lib/arvados/tmp/workbench2
+trap "rm -r ${tmp}" ERR EXIT
+dst=/var/lib/arvados/workbench2
+rsync -a --delete-after "$src/" "$tmp/"
+env -C "$tmp" VERSION="`+inst.PackageVersion+`" BUILD_NUMBER=1 GIT_COMMIT="`+inst.Commit[:9]+`" yarn build
+rsync -a --delete-after "$tmp/build/" "$dst/"
`, stdout, stderr); err != nil {
return 1
}
@@ -771,7 +803,7 @@ rsync -a --delete-after build/ /var/lib/arvados/workbench2/
type osversion struct {
Debian bool
Ubuntu bool
- Centos bool
+ RedHat bool
Major int
}
@@ -809,10 +841,24 @@ func identifyOS() (osversion, error) {
osv.Ubuntu = true
case "debian":
osv.Debian = true
- case "centos":
- osv.Centos = true
default:
- return osv, fmt.Errorf("unsupported ID in /etc/os-release: %q", kv["ID"])
+ idLikeMatched := false
+ for _, idLike := range strings.Split(kv["ID_LIKE"], " ") {
+ switch idLike {
+ case "debian":
+ osv.Debian = true
+ idLikeMatched = true
+ case "rhel":
+ osv.RedHat = true
+ idLikeMatched = true
+ }
+ if idLikeMatched {
+ break
+ }
+ }
+ if !idLikeMatched {
+ return osv, fmt.Errorf("no supported ID found in /etc/os-release")
+ }
}
vstr := kv["VERSION_ID"]
if i := strings.Index(vstr, "."); i > 0 {
@@ -873,7 +919,7 @@ func prodpkgs(osv osversion) []string {
return append(pkgs,
"mime-support", // keep-web
)
- } else if osv.Centos {
+ } else if osv.RedHat {
return append(pkgs,
"fuse-libs", // services/fuse
"mailcap", // keep-web
diff --git a/lib/install/deps_go_version_test.go b/lib/install/deps_go_version_test.go
index 1a69b6e617..a434c834d1 100644
--- a/lib/install/deps_go_version_test.go
+++ b/lib/install/deps_go_version_test.go
@@ -20,13 +20,11 @@ var _ = check.Suite(&Suite{})
type Suite struct{}
-/*
- TestExtractGoVersion tests the grep/awk command used in
- tools/arvbox/bin/arvbox to extract the version of Go to install for
- bootstrapping `arvados-server`.
-
- If this test is changed, the arvbox code will also need to be updated.
-*/
+// TestExtractGoVersion tests the grep/awk command used in
+// tools/arvbox/bin/arvbox to extract the version of Go to install for
+// bootstrapping `arvados-server`.
+//
+// If this test is changed, the arvbox code will also need to be updated.
func (*Suite) TestExtractGoVersion(c *check.C) {
script := `
sourcepath="$(realpath ../..)"
diff --git a/lib/install/deps_test.go b/lib/install/deps_test.go
index 993e779e5b..b9274b425c 100644
--- a/lib/install/deps_test.go
+++ b/lib/install/deps_test.go
@@ -13,19 +13,10 @@ package install
import (
"os"
- "testing"
"gopkg.in/check.v1"
)
-func Test(t *testing.T) {
- check.TestingT(t)
-}
-
-var _ = check.Suite(&Suite{})
-
-type Suite struct{}
-
func (*Suite) TestInstallDeps(c *check.C) {
tmp := c.MkDir()
script := `
@@ -36,13 +27,14 @@ sourcepath="$(realpath ../..)"
docker run -i --rm --workdir /arvados \
-v ${tmp}/arvados-server:/arvados-server:ro \
-v ${sourcepath}:/arvados:ro \
- -v /arvados/apps/workbench/.bundle \
-v /arvados/services/api/.bundle \
-v /arvados/services/api/tmp \
--env http_proxy \
--env https_proxy \
- debian:10 \
- bash -c "/arvados-server install -type test && /arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -shutdown -timeout 9m"
+ debian:11 \
+ bash -c "/arvados-server install -type test &&
+ git config --global --add safe.directory /arvados &&
+ /arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -shutdown -timeout 9m"
`
- c.Check(runBash(script, os.Stdout, os.Stderr), check.IsNil)
+ c.Check((&installCommand{}).runBash(script, os.Stdout, os.Stderr), check.IsNil)
}
diff --git a/lib/install/example_from_scratch.sh b/lib/install/example_from_scratch.sh
index 03d9b7f63b..182e1bfeb5 100644
--- a/lib/install/example_from_scratch.sh
+++ b/lib/install/example_from_scratch.sh
@@ -1,17 +1,19 @@
#!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
set -e -o pipefail
-# Starting with a base debian buster system, like "docker run -it
-# debian:10"...
+# Starting with a base debian bullseye system, like "docker run -it
+# debian:11"...
apt update
apt upgrade
apt install --no-install-recommends build-essential ca-certificates git golang
git clone https://git.arvados.org/arvados.git
-cd arvados
-[[ -e lib/install ]] || git checkout origin/16053-install-deps
-cd cmd/arvados-server
+cd arvados/cmd/arvados-server
go run ./cmd/arvados-server install -type test
-pg_isready || pg_ctlcluster 11 main start # only needed if there's no init process (as in docker)
+pg_isready || pg_ctlcluster 13 main start # only needed if there's no init process (as in docker)
build/run-tests.sh
diff --git a/lib/install/init.go b/lib/install/init.go
index c362c32b87..d9b74f6a06 100644
--- a/lib/install/init.go
+++ b/lib/install/init.go
@@ -301,8 +301,6 @@ func (initcmd *initCommand) RunCommand(prog string, args []string, stdin io.Read
DriverParameters:
Root: /var/lib/arvados/keep
Replication: 2
- Workbench:
- SecretKeyBase: {{printf "%q" ( .RandomHex 50 )}}
{{if .LoginPAM}}
Login:
PAM:
diff --git a/lib/lsf/dispatch.go b/lib/lsf/dispatch.go
index e2348337e6..897e5803f2 100644
--- a/lib/lsf/dispatch.go
+++ b/lib/lsf/dispatch.go
@@ -18,6 +18,8 @@ import (
"time"
"git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/lib/dispatchcloud"
"git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -58,6 +60,7 @@ type dispatcher struct {
Registry *prometheus.Registry
logger logrus.FieldLogger
+ dbConnector ctrlctx.DBConnector
lsfcli lsfcli
lsfqueue lsfqueue
arvDispatcher *dispatch.Dispatcher
@@ -73,7 +76,9 @@ type dispatcher struct {
func (disp *dispatcher) Start() {
disp.initOnce.Do(func() {
disp.init()
+ dblock.Dispatch.Lock(context.Background(), disp.dbConnector.GetDB)
go func() {
+ defer dblock.Dispatch.Unlock()
disp.checkLsfQueueForOrphans()
err := disp.arvDispatcher.Run(disp.Context)
if err != nil {
@@ -125,6 +130,7 @@ func (disp *dispatcher) init() {
lsfcli: &disp.lsfcli,
}
disp.ArvClient.AuthToken = disp.AuthToken
+ disp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.Cluster.PostgreSQL}
disp.stop = make(chan struct{}, 1)
disp.stopped = make(chan struct{})
@@ -170,6 +176,19 @@ func (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
if ctr.State != dispatch.Locked {
// already started by prior invocation
} else if _, ok := disp.lsfqueue.Lookup(ctr.UUID); !ok {
+ if _, err := dispatchcloud.ChooseInstanceType(disp.Cluster, &ctr); errors.As(err, &dispatchcloud.ConstraintsNotSatisfiableError{}) {
+ err := disp.arvDispatcher.Arv.Update("containers", ctr.UUID, arvadosclient.Dict{
+ "container": map[string]interface{}{
+ "runtime_status": map[string]string{
+ "error": err.Error(),
+ },
+ },
+ }, nil)
+ if err != nil {
+ return fmt.Errorf("error setting runtime_status on %s: %s", ctr.UUID, err)
+ }
+ return disp.arvDispatcher.UpdateState(ctr.UUID, dispatch.Cancelled)
+ }
disp.logger.Printf("Submitting container %s to LSF", ctr.UUID)
cmd := []string{disp.Cluster.Containers.CrunchRunCommand}
cmd = append(cmd, "--runtime-engine="+disp.Cluster.Containers.RuntimeEngine)
@@ -184,9 +203,8 @@ func (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
defer disp.logger.Printf("Done monitoring container %s", ctr.UUID)
go func(uuid string) {
- cancelled := false
for ctx.Err() == nil {
- qent, ok := disp.lsfqueue.Lookup(uuid)
+ _, ok := disp.lsfqueue.Lookup(uuid)
if !ok {
// If the container disappears from
// the lsf queue, there is no point in
@@ -196,25 +214,6 @@ func (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
cancel()
return
}
- if !cancelled && qent.Stat == "PEND" && strings.Contains(qent.PendReason, "There are no suitable hosts for the job") {
- disp.logger.Printf("container %s: %s", uuid, qent.PendReason)
- err := disp.arvDispatcher.Arv.Update("containers", uuid, arvadosclient.Dict{
- "container": map[string]interface{}{
- "runtime_status": map[string]string{
- "error": qent.PendReason,
- },
- },
- }, nil)
- if err != nil {
- disp.logger.Printf("error setting runtime_status on %s: %s", uuid, err)
- continue // retry
- }
- err = disp.arvDispatcher.UpdateState(uuid, dispatch.Cancelled)
- if err != nil {
- continue // retry (UpdateState() already logged the error)
- }
- cancelled = true
- }
}
}(ctr.UUID)
@@ -307,6 +306,15 @@ func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error)
container.RuntimeConstraints.KeepCacheRAM+
int64(disp.Cluster.Containers.ReserveExtraRAM)) / 1048576))
+ maxruntime := time.Duration(container.SchedulingParameters.MaxRunTime) * time.Second
+ if maxruntime == 0 {
+ maxruntime = disp.Cluster.Containers.LSF.MaxRunTimeDefault.Duration()
+ }
+ if maxruntime > 0 {
+ maxruntime += disp.Cluster.Containers.LSF.MaxRunTimeOverhead.Duration()
+ }
+ maxrunminutes := int64(math.Ceil(float64(maxruntime.Seconds()) / 60))
+
repl := map[string]string{
"%%": "%",
"%C": fmt.Sprintf("%d", vcpus),
@@ -314,6 +322,7 @@ func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error)
"%T": fmt.Sprintf("%d", tmp),
"%U": container.UUID,
"%G": fmt.Sprintf("%d", container.RuntimeConstraints.CUDA.DeviceCount),
+ "%W": fmt.Sprintf("%d", maxrunminutes),
}
re := regexp.MustCompile(`%.`)
@@ -322,7 +331,16 @@ func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error)
if container.RuntimeConstraints.CUDA.DeviceCount > 0 {
argumentTemplate = append(argumentTemplate, disp.Cluster.Containers.LSF.BsubCUDAArguments...)
}
- for _, a := range argumentTemplate {
+ for idx, a := range argumentTemplate {
+ if idx > 0 && (argumentTemplate[idx-1] == "-W" || argumentTemplate[idx-1] == "-We") && a == "%W" && maxrunminutes == 0 {
+ // LSF docs don't specify an argument to "-W"
+ // or "-We" that indicates "unknown", so
+ // instead we drop the "-W %W" part of the
+ // command line entirely when max runtime is
+ // unknown.
+ args = args[:len(args)-1]
+ continue
+ }
args = append(args, re.ReplaceAllStringFunc(a, func(s string) string {
subst := repl[s]
if len(subst) == 0 {
diff --git a/lib/lsf/dispatch_test.go b/lib/lsf/dispatch_test.go
index a99983f34a..e1e0bcae31 100644
--- a/lib/lsf/dispatch_test.go
+++ b/lib/lsf/dispatch_test.go
@@ -32,7 +32,9 @@ var _ = check.Suite(&suite{})
type suite struct {
disp *dispatcher
crTooBig arvados.ContainerRequest
+ crPending arvados.ContainerRequest
crCUDARequest arvados.ContainerRequest
+ crMaxRunTime arvados.ContainerRequest
}
func (s *suite) TearDownTest(c *check.C) {
@@ -44,8 +46,16 @@ func (s *suite) SetUpTest(c *check.C) {
c.Assert(err, check.IsNil)
cluster, err := cfg.GetCluster("")
c.Assert(err, check.IsNil)
+ cluster.Containers.ReserveExtraRAM = 256 << 20
cluster.Containers.CloudVMs.PollInterval = arvados.Duration(time.Second / 4)
cluster.Containers.MinRetryPeriod = arvados.Duration(time.Second / 4)
+ cluster.InstanceTypes = arvados.InstanceTypeMap{
+ "biggest_available_node": arvados.InstanceType{
+ RAM: 100 << 30, // 100 GiB
+ VCPUs: 4,
+ IncludedScratch: 100 << 30,
+ Scratch: 100 << 30,
+ }}
s.disp = newHandler(context.Background(), cluster, arvadostest.Dispatch1Token, prometheus.NewRegistry()).(*dispatcher)
s.disp.lsfcli.stubCommand = func(string, ...string) *exec.Cmd {
return exec.Command("bash", "-c", "echo >&2 unimplemented stub; false")
@@ -67,6 +77,24 @@ func (s *suite) SetUpTest(c *check.C) {
})
c.Assert(err, check.IsNil)
+ err = arvados.NewClientFromEnv().RequestAndDecode(&s.crPending, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{
+ "container_request": map[string]interface{}{
+ "runtime_constraints": arvados.RuntimeConstraints{
+ RAM: 100000000,
+ VCPUs: 2,
+ KeepCacheDisk: 8 << 30,
+ },
+ "container_image": arvadostest.DockerImage112PDH,
+ "command": []string{"sleep", "1"},
+ "mounts": map[string]arvados.Mount{"/mnt/out": {Kind: "tmp", Capacity: 1000}},
+ "output_path": "/mnt/out",
+ "state": arvados.ContainerRequestStateCommitted,
+ "priority": 1,
+ "container_count_max": 1,
+ },
+ })
+ c.Assert(err, check.IsNil)
+
err = arvados.NewClientFromEnv().RequestAndDecode(&s.crCUDARequest, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{
"container_request": map[string]interface{}{
"runtime_constraints": arvados.RuntimeConstraints{
@@ -89,6 +117,25 @@ func (s *suite) SetUpTest(c *check.C) {
})
c.Assert(err, check.IsNil)
+ err = arvados.NewClientFromEnv().RequestAndDecode(&s.crMaxRunTime, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{
+ "container_request": map[string]interface{}{
+ "runtime_constraints": arvados.RuntimeConstraints{
+ RAM: 1000000,
+ VCPUs: 1,
+ },
+ "scheduling_parameters": arvados.SchedulingParameters{
+ MaxRunTime: 124,
+ },
+ "container_image": arvadostest.DockerImage112PDH,
+ "command": []string{"sleep", "123"},
+ "mounts": map[string]arvados.Mount{"/mnt/out": {Kind: "tmp", Capacity: 1000}},
+ "output_path": "/mnt/out",
+ "state": arvados.ContainerRequestStateCommitted,
+ "priority": 1,
+ "container_count_max": 1,
+ },
+ })
+ c.Assert(err, check.IsNil)
}
type lsfstub struct {
@@ -114,12 +161,7 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
}
switch prog {
case "bsub":
- defaultArgs := s.disp.Cluster.Containers.LSF.BsubArgumentsList
- if args[5] == s.crCUDARequest.ContainerUUID {
- c.Assert(len(args), check.Equals, len(defaultArgs)+len(s.disp.Cluster.Containers.LSF.BsubCUDAArguments))
- } else {
- c.Assert(len(args), check.Equals, len(defaultArgs))
- }
+ c.Assert(len(args) > 5, check.Equals, true)
// %%J must have been rewritten to %J
c.Check(args[1], check.Equals, "/tmp/crunch-run.%J.out")
args = args[4:]
@@ -150,15 +192,15 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
fakejobq[nextjobid] = args[1]
nextjobid++
mtx.Unlock()
- case s.crTooBig.ContainerUUID:
+ case s.crPending.ContainerUUID:
c.Check(args, check.DeepEquals, []string{
- "-J", s.crTooBig.ContainerUUID,
- "-n", "1",
- "-D", "954187MB",
- "-R", "rusage[mem=954187MB:tmp=256MB] span[hosts=1]",
- "-R", "select[mem>=954187MB]",
- "-R", "select[tmp>=256MB]",
- "-R", "select[ncpus>=1]"})
+ "-J", s.crPending.ContainerUUID,
+ "-n", "2",
+ "-D", "352MB",
+ "-R", "rusage[mem=352MB:tmp=8448MB] span[hosts=1]",
+ "-R", "select[mem>=352MB]",
+ "-R", "select[tmp>=8448MB]",
+ "-R", "select[ncpus>=2]"})
mtx.Lock()
fakejobq[nextjobid] = args[1]
nextjobid++
@@ -177,6 +219,21 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
fakejobq[nextjobid] = args[1]
nextjobid++
mtx.Unlock()
+ case s.crMaxRunTime.ContainerUUID:
+ c.Check(args, check.DeepEquals, []string{
+ "-J", s.crMaxRunTime.ContainerUUID,
+ "-n", "1",
+ "-D", "257MB",
+ "-R", "rusage[mem=257MB:tmp=2304MB] span[hosts=1]",
+ "-R", "select[mem>=257MB]",
+ "-R", "select[tmp>=2304MB]",
+ "-R", "select[ncpus>=1]",
+ "-We", "8", // 124s + 5m overhead + roundup = 8m
+ })
+ mtx.Lock()
+ fakejobq[nextjobid] = args[1]
+ nextjobid++
+ mtx.Unlock()
default:
c.Errorf("unexpected uuid passed to bsub: args %q", args)
return exec.Command("false")
@@ -187,7 +244,7 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
var records []map[string]interface{}
for jobid, uuid := range fakejobq {
stat, reason := "RUN", ""
- if uuid == s.crTooBig.ContainerUUID {
+ if uuid == s.crPending.ContainerUUID {
// The real bjobs output includes a trailing ';' here:
stat, reason = "PEND", "There are no suitable hosts for the job;"
}
@@ -242,23 +299,28 @@ func (s *suite) TestSubmit(c *check.C) {
c.Error("timed out")
break
}
+ // "crTooBig" should never be submitted to lsf because
+ // it is bigger than any configured instance type
+ if ent, ok := s.disp.lsfqueue.Lookup(s.crTooBig.ContainerUUID); ok {
+ c.Errorf("Lookup(crTooBig) == true, ent = %#v", ent)
+ break
+ }
// "queuedcontainer" should be running
if _, ok := s.disp.lsfqueue.Lookup(arvadostest.QueuedContainerUUID); !ok {
c.Log("Lookup(queuedcontainer) == false")
continue
}
+ // "crPending" should be pending
+ if ent, ok := s.disp.lsfqueue.Lookup(s.crPending.ContainerUUID); !ok {
+ c.Logf("Lookup(crPending) == false", ent)
+ continue
+ }
// "lockedcontainer" should be cancelled because it
// has priority 0 (no matching container requests)
if ent, ok := s.disp.lsfqueue.Lookup(arvadostest.LockedContainerUUID); ok {
c.Logf("Lookup(lockedcontainer) == true, ent = %#v", ent)
continue
}
- // "crTooBig" should be cancelled because lsf stub
- // reports there is no suitable instance type
- if ent, ok := s.disp.lsfqueue.Lookup(s.crTooBig.ContainerUUID); ok {
- c.Logf("Lookup(crTooBig) == true, ent = %#v", ent)
- continue
- }
var ctr arvados.Container
if err := s.disp.arvDispatcher.Arv.Get("containers", arvadostest.LockedContainerUUID, nil, &ctr); err != nil {
c.Logf("error getting container state for %s: %s", arvadostest.LockedContainerUUID, err)
@@ -275,7 +337,7 @@ func (s *suite) TestSubmit(c *check.C) {
c.Logf("container %s is not in the LSF queue but its arvados record has not been updated to state==Cancelled (state is %q)", s.crTooBig.ContainerUUID, ctr.State)
continue
} else {
- c.Check(ctr.RuntimeStatus["error"], check.Equals, "There are no suitable hosts for the job;")
+ c.Check(ctr.RuntimeStatus["error"], check.Equals, "constraints not satisfiable by any configured instance type")
}
c.Log("reached desired state")
break
diff --git a/lib/mount/command.go b/lib/mount/command.go
index f88d977c4c..eab9fd944c 100644
--- a/lib/mount/command.go
+++ b/lib/mount/command.go
@@ -17,8 +17,11 @@ import (
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/arvados/cgofuse/fuse"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
)
var Command = &mountCommand{}
@@ -27,7 +30,7 @@ type mountCommand struct {
// ready, if non-nil, will be closed when the mount is
// initialized. If ready is non-nil, it RunCommand() should
// not be called more than once, or when ready is already
- // closed.
+ // closed. Only intended for testing.
ready chan struct{}
// It is safe to call Unmount only after ready has been
// closed.
@@ -39,19 +42,32 @@ type mountCommand struct {
// The "-d" fuse option (and perhaps other features) ignores the
// stderr argument and prints to os.Stderr instead.
func (c *mountCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
- logger := log.New(stderr, prog+" ", 0)
+ logger := ctxlog.New(stderr, "text", "info")
+ defer logger.Debug("exiting")
+
flags := flag.NewFlagSet(prog, flag.ContinueOnError)
ro := flags.Bool("ro", false, "read-only")
experimental := flags.Bool("experimental", false, "acknowledge this is an experimental command, and should not be used in production (required)")
- blockCache := flags.Int("block-cache", 4, "read cache size (number of 64MiB blocks)")
+ cacheSizeStr := flags.String("cache-size", "0", "cache size as percent of home filesystem size (\"5%\") or size (\"10GiB\") or 0 for automatic")
+ logLevel := flags.String("log-level", "info", "logging level (debug, info, ...)")
+ debug := flags.Bool("debug", false, "alias for -log-level=debug")
pprof := flags.String("pprof", "", "serve Go profile data at `[addr]:port`")
if ok, code := cmd.ParseFlags(flags, prog, args, "[FUSE mount options]", stderr); !ok {
return code
}
if !*experimental {
- logger.Printf("error: experimental command %q used without --experimental flag", prog)
+ logger.Errorf("experimental command %q used without --experimental flag", prog)
+ return 2
+ }
+ lvl, err := logrus.ParseLevel(*logLevel)
+ if err != nil {
+ logger.WithError(err).Error("invalid argument for -log-level flag")
return 2
}
+ if *debug {
+ lvl = logrus.DebugLevel
+ }
+ logger.SetLevel(lvl)
if *pprof != "" {
go func() {
log.Println(http.ListenAndServe(*pprof, nil))
@@ -59,26 +75,32 @@ func (c *mountCommand) RunCommand(prog string, args []string, stdin io.Reader, s
}
client := arvados.NewClientFromEnv()
+ if err := yaml.Unmarshal([]byte(*cacheSizeStr), &client.DiskCacheSize); err != nil {
+ logger.Errorf("error parsing -cache-size argument: %s", err)
+ return 2
+ }
ac, err := arvadosclient.New(client)
if err != nil {
- logger.Print(err)
+ logger.Error(err)
return 1
}
kc, err := keepclient.MakeKeepClient(ac)
if err != nil {
- logger.Print(err)
+ logger.Error(err)
return 1
}
- kc.BlockCache = &keepclient.BlockCache{MaxBlocks: *blockCache}
host := fuse.NewFileSystemHost(&keepFS{
Client: client,
KeepClient: kc,
ReadOnly: *ro,
Uid: os.Getuid(),
Gid: os.Getgid(),
+ Logger: logger,
ready: c.ready,
})
c.Unmount = host.Unmount
+
+ logger.WithField("mountargs", flags.Args()).Debug("mounting")
ok := host.Mount("", flags.Args())
if !ok {
return 1
diff --git a/lib/mount/fs.go b/lib/mount/fs.go
index 3c2e628d01..dece44d25d 100644
--- a/lib/mount/fs.go
+++ b/lib/mount/fs.go
@@ -15,6 +15,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/arvados/cgofuse/fuse"
+ "github.com/sirupsen/logrus"
)
// sharedFile wraps arvados.File with a sync.Mutex, so fuse can safely
@@ -33,6 +34,7 @@ type keepFS struct {
ReadOnly bool
Uid int
Gid int
+ Logger logrus.FieldLogger
root arvados.CustomFileSystem
open map[uint64]*sharedFile
@@ -79,6 +81,7 @@ func (fs *keepFS) Init() {
func (fs *keepFS) Create(path string, flags int, mode uint32) (errc int, fh uint64) {
defer fs.debugPanics()
+ fs.debugOp("Create", path)
if fs.ReadOnly {
return -fuse.EROFS, invalidFH
}
@@ -93,6 +96,7 @@ func (fs *keepFS) Create(path string, flags int, mode uint32) (errc int, fh uint
func (fs *keepFS) Open(path string, flags int) (errc int, fh uint64) {
defer fs.debugPanics()
+ fs.debugOp("Open", path)
if fs.ReadOnly && flags&(os.O_RDWR|os.O_WRONLY|os.O_CREATE) != 0 {
return -fuse.EROFS, invalidFH
}
@@ -110,21 +114,30 @@ func (fs *keepFS) Open(path string, flags int) (errc int, fh uint64) {
func (fs *keepFS) Utimens(path string, tmsp []fuse.Timespec) int {
defer fs.debugPanics()
+ fs.debugOp("Utimens", path)
if fs.ReadOnly {
return -fuse.EROFS
}
f, err := fs.root.OpenFile(path, 0, 0)
if err != nil {
- return fs.errCode(err)
+ return fs.errCode("Utimens", path, err)
}
f.Close()
return 0
}
-func (fs *keepFS) errCode(err error) int {
+func (fs *keepFS) errCode(op, path string, err error) (errc int) {
if err == nil {
return 0
}
+ defer func() {
+ fs.Logger.WithFields(logrus.Fields{
+ "op": op,
+ "path": path,
+ "errno": errc,
+ "error": err,
+ }).Debug("fuse call returned error")
+ }()
if errors.Is(err, os.ErrNotExist) {
return -fuse.ENOENT
}
@@ -145,12 +158,13 @@ func (fs *keepFS) errCode(err error) int {
func (fs *keepFS) Mkdir(path string, mode uint32) int {
defer fs.debugPanics()
+ fs.debugOp("Mkdir", path)
if fs.ReadOnly {
return -fuse.EROFS
}
f, err := fs.root.OpenFile(path, os.O_CREATE|os.O_EXCL, os.FileMode(mode)|os.ModeDir)
if err != nil {
- return fs.errCode(err)
+ return fs.errCode("Mkdir", path, err)
}
f.Close()
return 0
@@ -158,11 +172,12 @@ func (fs *keepFS) Mkdir(path string, mode uint32) int {
func (fs *keepFS) Opendir(path string) (errc int, fh uint64) {
defer fs.debugPanics()
+ fs.debugOp("Opendir", path)
f, err := fs.root.OpenFile(path, 0, 0)
if err != nil {
- return fs.errCode(err), invalidFH
+ return fs.errCode("Opendir", path, err), invalidFH
} else if fi, err := f.Stat(); err != nil {
- return fs.errCode(err), invalidFH
+ return fs.errCode("Opendir", path, err), invalidFH
} else if !fi.IsDir() {
f.Close()
return -fuse.ENOTDIR, invalidFH
@@ -172,16 +187,19 @@ func (fs *keepFS) Opendir(path string) (errc int, fh uint64) {
func (fs *keepFS) Releasedir(path string, fh uint64) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Releasedir", path)
return fs.Release(path, fh)
}
func (fs *keepFS) Rmdir(path string) int {
defer fs.debugPanics()
- return fs.errCode(fs.root.Remove(path))
+ fs.debugOp("Rmdir", path)
+ return fs.errCode("Rmdir", path, fs.root.Remove(path))
}
func (fs *keepFS) Release(path string, fh uint64) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Release", path)
fs.Lock()
defer fs.Unlock()
defer delete(fs.open, fh)
@@ -196,22 +214,25 @@ func (fs *keepFS) Release(path string, fh uint64) (errc int) {
func (fs *keepFS) Rename(oldname, newname string) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Rename", oldname+" -> "+newname)
if fs.ReadOnly {
return -fuse.EROFS
}
- return fs.errCode(fs.root.Rename(oldname, newname))
+ return fs.errCode("Rename", oldname+" -> "+newname, fs.root.Rename(oldname, newname))
}
func (fs *keepFS) Unlink(path string) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Unlink", path)
if fs.ReadOnly {
return -fuse.EROFS
}
- return fs.errCode(fs.root.Remove(path))
+ return fs.errCode("Unlink", path, fs.root.Remove(path))
}
func (fs *keepFS) Truncate(path string, size int64, fh uint64) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Truncate", path)
if fs.ReadOnly {
return -fuse.EROFS
}
@@ -219,20 +240,21 @@ func (fs *keepFS) Truncate(path string, size int64, fh uint64) (errc int) {
// Sometimes fh is a valid filehandle and we don't need to
// waste a name lookup.
if f := fs.lookupFH(fh); f != nil {
- return fs.errCode(f.Truncate(size))
+ return fs.errCode("Truncate", path, f.Truncate(size))
}
// Other times, fh is invalid and we need to lookup path.
f, err := fs.root.OpenFile(path, os.O_RDWR, 0)
if err != nil {
- return fs.errCode(err)
+ return fs.errCode("Truncate", path, err)
}
defer f.Close()
- return fs.errCode(f.Truncate(size))
+ return fs.errCode("Truncate", path, f.Truncate(size))
}
func (fs *keepFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Getattr", path)
var fi os.FileInfo
var err error
if f := fs.lookupFH(fh); f != nil {
@@ -243,18 +265,20 @@ func (fs *keepFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int)
fi, err = fs.root.Stat(path)
}
if err != nil {
- return fs.errCode(err)
+ return fs.errCode("Getattr", path, err)
}
fs.fillStat(stat, fi)
return 0
}
func (fs *keepFS) Chmod(path string, mode uint32) (errc int) {
+ defer fs.debugPanics()
+ fs.debugOp("Chmod", path)
if fs.ReadOnly {
return -fuse.EROFS
}
if fi, err := fs.root.Stat(path); err != nil {
- return fs.errCode(err)
+ return fs.errCode("Chmod", path, err)
} else if mode & ^uint32(fuse.S_IFREG|fuse.S_IFDIR|0777) != 0 {
// Refuse to set mode bits other than
// regfile/dir/perms
@@ -298,6 +322,7 @@ func (fs *keepFS) fillStat(stat *fuse.Stat_t, fi os.FileInfo) {
func (fs *keepFS) Write(path string, buf []byte, ofst int64, fh uint64) (n int) {
defer fs.debugPanics()
+ fs.debugOp("Write", path)
if fs.ReadOnly {
return -fuse.EROFS
}
@@ -308,18 +333,18 @@ func (fs *keepFS) Write(path string, buf []byte, ofst int64, fh uint64) (n int)
f.Lock()
defer f.Unlock()
if _, err := f.Seek(ofst, io.SeekStart); err != nil {
- return fs.errCode(err)
+ return fs.errCode("Write", path, err)
}
n, err := f.Write(buf)
if err != nil {
- log.Printf("error writing %q: %s", path, err)
- return fs.errCode(err)
+ return fs.errCode("Write", path, err)
}
return n
}
func (fs *keepFS) Read(path string, buf []byte, ofst int64, fh uint64) (n int) {
defer fs.debugPanics()
+ fs.debugOp("Read", path)
f := fs.lookupFH(fh)
if f == nil {
return -fuse.EBADF
@@ -327,7 +352,7 @@ func (fs *keepFS) Read(path string, buf []byte, ofst int64, fh uint64) (n int) {
f.Lock()
defer f.Unlock()
if _, err := f.Seek(ofst, io.SeekStart); err != nil {
- return fs.errCode(err)
+ return fs.errCode("Read", path, err)
}
n, err := f.Read(buf)
for err == nil && n < len(buf) {
@@ -341,8 +366,7 @@ func (fs *keepFS) Read(path string, buf []byte, ofst int64, fh uint64) (n int) {
n += done
}
if err != nil && err != io.EOF {
- log.Printf("error reading %q: %s", path, err)
- return fs.errCode(err)
+ return fs.errCode("Read", path, err)
}
return n
}
@@ -352,6 +376,7 @@ func (fs *keepFS) Readdir(path string,
ofst int64,
fh uint64) (errc int) {
defer fs.debugPanics()
+ fs.debugOp("Readdir", path)
f := fs.lookupFH(fh)
if f == nil {
return -fuse.EBADF
@@ -361,7 +386,7 @@ func (fs *keepFS) Readdir(path string,
var stat fuse.Stat_t
fis, err := f.Readdir(-1)
if err != nil {
- return fs.errCode(err)
+ return fs.errCode("Readdir", path, err)
}
for _, fi := range fis {
fs.fillStat(&stat, fi)
@@ -372,14 +397,16 @@ func (fs *keepFS) Readdir(path string,
func (fs *keepFS) Fsync(path string, datasync bool, fh uint64) int {
defer fs.debugPanics()
+ fs.debugOp("Fsync", path)
f := fs.lookupFH(fh)
if f == nil {
return -fuse.EBADF
}
- return fs.errCode(f.Sync())
+ return fs.errCode("Fsync", path, f.Sync())
}
func (fs *keepFS) Fsyncdir(path string, datasync bool, fh uint64) int {
+ fs.debugOp("Fsyncdir", path)
return fs.Fsync(path, datasync, fh)
}
@@ -393,3 +420,7 @@ func (fs *keepFS) debugPanics() {
panic(err)
}
}
+
+func (fs *keepFS) debugOp(op, path string) {
+ fs.Logger.WithFields(nil).Tracef("fuse call %s %s", op, path)
+}
diff --git a/lib/mount/fs_test.go b/lib/mount/fs_test.go
index fef2c0f069..442af7a998 100644
--- a/lib/mount/fs_test.go
+++ b/lib/mount/fs_test.go
@@ -9,6 +9,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/arvados/cgofuse/fuse"
check "gopkg.in/check.v1"
@@ -37,6 +38,7 @@ func (*FSSuite) TestOpendir(c *check.C) {
var fs fuse.FileSystemInterface = &keepFS{
Client: client,
KeepClient: kc,
+ Logger: ctxlog.TestLogger(c),
}
fs.Init()
errc, fh := fs.Opendir("/by_id")
diff --git a/lib/pam/docker_test.go b/lib/pam/docker_test.go
index fa16b313be..196cb97174 100644
--- a/lib/pam/docker_test.go
+++ b/lib/pam/docker_test.go
@@ -114,7 +114,7 @@ func (s *DockerSuite) runTestClient(c *check.C, args ...string) (stdout, stderr
"-v", s.tmpdir + "/pam_arvados.so:/usr/lib/pam_arvados.so:ro",
"-v", s.tmpdir + "/conffile:/usr/share/pam-configs/arvados:ro",
"-v", s.tmpdir + "/testclient:/testclient:ro",
- "debian:buster",
+ "debian:bullseye",
"/testclient"}, args...)...)
stdout = &bytes.Buffer{}
stderr = &bytes.Buffer{}
diff --git a/lib/pam/fpm-info.sh b/lib/pam/fpm-info.sh
index 43c04a67e2..952fb557c7 100644
--- a/lib/pam/fpm-info.sh
+++ b/lib/pam/fpm-info.sh
@@ -3,5 +3,8 @@
# SPDX-License-Identifier: Apache-2.0
fpm_depends+=(ca-certificates)
+case "$TARGET" in
+ rocky*) fpm_depends+=(pam) ;;
+esac
fpm_args+=(--conflicts=libpam-arvados)
diff --git a/lib/pam/testclient.go b/lib/pam/testclient.go
index 33bd47a357..02a278c0e6 100644
--- a/lib/pam/testclient.go
+++ b/lib/pam/testclient.go
@@ -76,7 +76,7 @@ func main() {
}
err = tx.Authenticate(pam.DisallowNullAuthtok)
if err != nil {
- err = fmt.Errorf("PAM: %s (message = %q)", err, errorMessage)
+ err = fmt.Errorf("PAM: %s (message = %q, sentPassword = %v)", err, errorMessage, sentPassword)
logrus.WithError(err).Print("authentication failed")
os.Exit(1)
}
diff --git a/lib/service/cmd.go b/lib/service/cmd.go
index 20441c2a6c..82e95fe0b4 100644
--- a/lib/service/cmd.go
+++ b/lib/service/cmd.go
@@ -12,10 +12,13 @@ import (
"io"
"net"
"net/http"
+ "net/http/httptest"
_ "net/http/pprof"
"net/url"
"os"
+ "regexp"
"strings"
+ "time"
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/lib/config"
@@ -45,6 +48,8 @@ type command struct {
ctx context.Context // enables tests to shutdown service; no public API yet
}
+var requestQueueDumpCheckInterval = time.Minute
+
// Command returns a cmd.Handler that loads site config, calls
// newHandler with the current cluster and node configs, and brings up
// an http server with the returned handler.
@@ -150,7 +155,7 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
httpserver.Inspect(reg, cluster.ManagementToken,
httpserver.LogRequests(
interceptHealthReqs(cluster.ManagementToken, handler.CheckHealth,
- httpserver.NewRequestLimiter(cluster.API.MaxConcurrentRequests, handler, reg)))))))
+ c.requestLimiter(handler, cluster, reg)))))))
srv := &httpserver.Server{
Server: http.Server{
Handler: ifCollectionInHost(instrumented, instrumented.ServeAPI(cluster.ManagementToken, instrumented)),
@@ -189,6 +194,7 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
<-handler.Done()
srv.Close()
}()
+ go c.requestQueueDumpCheck(cluster, prog, reg, &srv.Server, logger)
err = srv.Wait()
if err != nil {
return 1
@@ -196,6 +202,153 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
return 0
}
+// If SystemLogs.RequestQueueDumpDirectory is set, monitor the
+// server's incoming HTTP request limiters. When the number of
+// concurrent requests in any queue ("api" or "tunnel") exceeds 90% of
+// its maximum slots, write the /_inspect/requests data to a JSON file
+// in the specified directory.
+func (c *command) requestQueueDumpCheck(cluster *arvados.Cluster, prog string, reg *prometheus.Registry, srv *http.Server, logger logrus.FieldLogger) {
+ outdir := cluster.SystemLogs.RequestQueueDumpDirectory
+ if outdir == "" || cluster.ManagementToken == "" {
+ return
+ }
+ logger = logger.WithField("worker", "RequestQueueDump")
+ outfile := outdir + "/" + prog + "-requests.json"
+ for range time.NewTicker(requestQueueDumpCheckInterval).C {
+ mfs, err := reg.Gather()
+ if err != nil {
+ logger.WithError(err).Warn("error getting metrics")
+ continue
+ }
+ cur := map[string]int{} // queue label => current
+ max := map[string]int{} // queue label => max
+ for _, mf := range mfs {
+ for _, m := range mf.GetMetric() {
+ for _, ml := range m.GetLabel() {
+ if ml.GetName() == "queue" {
+ n := int(m.GetGauge().GetValue())
+ if name := mf.GetName(); name == "arvados_concurrent_requests" {
+ cur[*ml.Value] = n
+ } else if name == "arvados_max_concurrent_requests" {
+ max[*ml.Value] = n
+ }
+ }
+ }
+ }
+ }
+ dump := false
+ for queue, n := range cur {
+ if n > 0 && max[queue] > 0 && n >= max[queue]*9/10 {
+ dump = true
+ break
+ }
+ }
+ if dump {
+ req, err := http.NewRequest("GET", "/_inspect/requests", nil)
+ if err != nil {
+ logger.WithError(err).Warn("error in http.NewRequest")
+ continue
+ }
+ req.Header.Set("Authorization", "Bearer "+cluster.ManagementToken)
+ resp := httptest.NewRecorder()
+ srv.Handler.ServeHTTP(resp, req)
+ if code := resp.Result().StatusCode; code != http.StatusOK {
+ logger.WithField("StatusCode", code).Warn("error getting /_inspect/requests")
+ continue
+ }
+ err = os.WriteFile(outfile, resp.Body.Bytes(), 0777)
+ if err != nil {
+ logger.WithError(err).Warn("error writing file")
+ continue
+ }
+ }
+ }
+}
+
+// Set up a httpserver.RequestLimiter with separate queues/streams for
+// API requests (obeying MaxConcurrentRequests etc) and gateway tunnel
+// requests (obeying MaxGatewayTunnels).
+func (c *command) requestLimiter(handler http.Handler, cluster *arvados.Cluster, reg *prometheus.Registry) http.Handler {
+ maxReqs := cluster.API.MaxConcurrentRequests
+ if maxRails := cluster.API.MaxConcurrentRailsRequests; maxRails > 0 &&
+ (maxRails < maxReqs || maxReqs == 0) &&
+ c.svcName == arvados.ServiceNameController {
+ // Ideally, we would accept up to
+ // MaxConcurrentRequests, and apply the
+ // MaxConcurrentRailsRequests limit only for requests
+ // that require calling upstream to RailsAPI. But for
+ // now we make the simplifying assumption that every
+ // controller request causes an upstream RailsAPI
+ // request.
+ maxReqs = maxRails
+ }
+ rqAPI := &httpserver.RequestQueue{
+ Label: "api",
+ MaxConcurrent: maxReqs,
+ MaxQueue: cluster.API.MaxQueuedRequests,
+ MaxQueueTimeForMinPriority: cluster.API.MaxQueueTimeForLockRequests.Duration(),
+ }
+ rqTunnel := &httpserver.RequestQueue{
+ Label: "tunnel",
+ MaxConcurrent: cluster.API.MaxGatewayTunnels,
+ MaxQueue: 0,
+ }
+ return &httpserver.RequestLimiter{
+ Handler: handler,
+ Priority: c.requestPriority,
+ Registry: reg,
+ Queue: func(req *http.Request) *httpserver.RequestQueue {
+ if req.Method == http.MethodPost && reTunnelPath.MatchString(req.URL.Path) {
+ return rqTunnel
+ } else {
+ return rqAPI
+ }
+ },
+ }
+}
+
+// reTunnelPath matches paths of API endpoints that go in the "tunnel"
+// queue.
+var reTunnelPath = regexp.MustCompile(func() string {
+ rePathVar := regexp.MustCompile(`{.*?}`)
+ out := ""
+ for _, endpoint := range []arvados.APIEndpoint{
+ arvados.EndpointContainerGatewayTunnel,
+ arvados.EndpointContainerGatewayTunnelCompat,
+ arvados.EndpointContainerSSH,
+ arvados.EndpointContainerSSHCompat,
+ } {
+ if out != "" {
+ out += "|"
+ }
+ out += `\Q/` + rePathVar.ReplaceAllString(endpoint.Path, `\E[^/]*\Q`) + `\E`
+ }
+ return "^(" + out + ")$"
+}())
+
+func (c *command) requestPriority(req *http.Request, queued time.Time) int64 {
+ switch {
+ case req.Method == http.MethodPost && strings.HasPrefix(req.URL.Path, "/arvados/v1/containers/") && strings.HasSuffix(req.URL.Path, "/lock"):
+ // Return 503 immediately instead of queueing. We want
+ // to send feedback to dispatchcloud ASAP to stop
+ // bringing up new containers.
+ return httpserver.MinPriority
+ case req.Method == http.MethodPost && strings.HasPrefix(req.URL.Path, "/arvados/v1/logs"):
+ // "Create log entry" is the most harmless kind of
+ // request to drop. Negative priority is called "low"
+ // in aggregate metrics.
+ return -1
+ case req.Header.Get("Origin") != "":
+ // Handle interactive requests first. Positive
+ // priority is called "high" in aggregate metrics.
+ return 1
+ default:
+ // Zero priority is called "normal" in aggregate
+ // metrics.
+ return 0
+ }
+}
+
// If an incoming request's target vhost has an embedded collection
// UUID or PDH, handle it with hTrue, otherwise handle it with
// hFalse.
diff --git a/lib/service/cmd_test.go b/lib/service/cmd_test.go
index 7db9109274..9ead90019e 100644
--- a/lib/service/cmd_test.go
+++ b/lib/service/cmd_test.go
@@ -9,12 +9,16 @@ import (
"bytes"
"context"
"crypto/tls"
+ "encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
+ "strings"
+ "sync"
+ "sync/atomic"
"testing"
"time"
@@ -37,15 +41,19 @@ const (
contextKey key = iota
)
-func (*Suite) TestGetListenAddress(c *check.C) {
+func unusedPort(c *check.C) string {
// Find an available port on the testing host, so the test
// cases don't get confused by "already in use" errors.
listener, err := net.Listen("tcp", ":")
c.Assert(err, check.IsNil)
- _, unusedPort, err := net.SplitHostPort(listener.Addr().String())
- c.Assert(err, check.IsNil)
listener.Close()
+ _, port, err := net.SplitHostPort(listener.Addr().String())
+ c.Assert(err, check.IsNil)
+ return port
+}
+func (*Suite) TestGetListenAddress(c *check.C) {
+ port := unusedPort(c)
defer os.Unsetenv("ARVADOS_SERVICE_INTERNAL_URL")
for idx, trial := range []struct {
// internalURL => listenURL, both with trailing "/"
@@ -58,17 +66,17 @@ func (*Suite) TestGetListenAddress(c *check.C) {
expectInternal string
}{
{
- internalURLs: map[string]string{"http://localhost:" + unusedPort + "/": ""},
- expectListen: "http://localhost:" + unusedPort + "/",
- expectInternal: "http://localhost:" + unusedPort + "/",
+ internalURLs: map[string]string{"http://localhost:" + port + "/": ""},
+ expectListen: "http://localhost:" + port + "/",
+ expectInternal: "http://localhost:" + port + "/",
},
{ // implicit port 80 in InternalURLs
internalURLs: map[string]string{"http://localhost/": ""},
expectErrorMatch: `.*:80: bind: permission denied`,
},
{ // implicit port 443 in InternalURLs
- internalURLs: map[string]string{"https://host.example/": "http://localhost:" + unusedPort + "/"},
- expectListen: "http://localhost:" + unusedPort + "/",
+ internalURLs: map[string]string{"https://host.example/": "http://localhost:" + port + "/"},
+ expectListen: "http://localhost:" + port + "/",
expectInternal: "https://host.example/",
},
{ // implicit port 443 in ListenURL
@@ -83,16 +91,16 @@ func (*Suite) TestGetListenAddress(c *check.C) {
{
internalURLs: map[string]string{
"https://hostname1.example/": "http://localhost:12435/",
- "https://hostname2.example/": "http://localhost:" + unusedPort + "/",
+ "https://hostname2.example/": "http://localhost:" + port + "/",
},
envVar: "https://hostname2.example", // note this works despite missing trailing "/"
- expectListen: "http://localhost:" + unusedPort + "/",
+ expectListen: "http://localhost:" + port + "/",
expectInternal: "https://hostname2.example/",
},
{ // cannot listen on any of the ListenURLs
internalURLs: map[string]string{
- "https://hostname1.example/": "http://1.2.3.4:" + unusedPort + "/",
- "https://hostname2.example/": "http://1.2.3.4:" + unusedPort + "/",
+ "https://hostname1.example/": "http://1.2.3.4:" + port + "/",
+ "https://hostname2.example/": "http://1.2.3.4:" + port + "/",
},
expectErrorMatch: "configuration does not enable the \"arvados-controller\" service on this host",
},
@@ -192,7 +200,232 @@ func (*Suite) TestCommand(c *check.C) {
c.Check(stderr.String(), check.Matches, `(?ms).*"msg":"CheckHealth called".*`)
}
+func (s *Suite) TestTunnelPathRegexp(c *check.C) {
+ c.Check(reTunnelPath.MatchString(`/arvados/v1/connect/zzzzz-dz642-aaaaaaaaaaaaaaa/gateway_tunnel`), check.Equals, true)
+ c.Check(reTunnelPath.MatchString(`/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa/gateway_tunnel`), check.Equals, true)
+ c.Check(reTunnelPath.MatchString(`/arvados/v1/connect/zzzzz-dz642-aaaaaaaaaaaaaaa/ssh`), check.Equals, true)
+ c.Check(reTunnelPath.MatchString(`/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa/ssh`), check.Equals, true)
+ c.Check(reTunnelPath.MatchString(`/blah/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa/ssh`), check.Equals, false)
+ c.Check(reTunnelPath.MatchString(`/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa`), check.Equals, false)
+}
+
+func (s *Suite) TestRequestLimitsAndDumpRequests_Keepweb(c *check.C) {
+ s.testRequestLimitAndDumpRequests(c, arvados.ServiceNameKeepweb, "MaxConcurrentRequests")
+}
+
+func (s *Suite) TestRequestLimitsAndDumpRequests_Controller(c *check.C) {
+ s.testRequestLimitAndDumpRequests(c, arvados.ServiceNameController, "MaxConcurrentRailsRequests")
+}
+
+func (*Suite) testRequestLimitAndDumpRequests(c *check.C, serviceName arvados.ServiceName, maxReqsConfigKey string) {
+ defer func(orig time.Duration) { requestQueueDumpCheckInterval = orig }(requestQueueDumpCheckInterval)
+ requestQueueDumpCheckInterval = time.Second / 10
+
+ port := unusedPort(c)
+ tmpdir := c.MkDir()
+ cf, err := ioutil.TempFile(tmpdir, "cmd_test.")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(cf.Name())
+ defer cf.Close()
+
+ max := 24
+ maxTunnels := 30
+ fmt.Fprintf(cf, `
+Clusters:
+ zzzzz:
+ SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ ManagementToken: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+ API:
+ `+maxReqsConfigKey+`: %d
+ MaxQueuedRequests: 1
+ MaxGatewayTunnels: %d
+ SystemLogs: {RequestQueueDumpDirectory: %q}
+ Services:
+ Controller:
+ ExternalURL: "http://localhost:`+port+`"
+ InternalURLs: {"http://localhost:`+port+`": {}}
+ WebDAV:
+ ExternalURL: "http://localhost:`+port+`"
+ InternalURLs: {"http://localhost:`+port+`": {}}
+`, max, maxTunnels, tmpdir)
+ cf.Close()
+
+ started := make(chan bool, max+1)
+ hold := make(chan bool)
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if strings.Contains(r.URL.Path, "/ssh") || strings.Contains(r.URL.Path, "/gateway_tunnel") {
+ <-hold
+ } else {
+ started <- true
+ <-hold
+ }
+ })
+ healthCheck := make(chan bool, 1)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ cmd := Command(serviceName, func(ctx context.Context, _ *arvados.Cluster, token string, reg *prometheus.Registry) Handler {
+ return &testHandler{ctx: ctx, handler: handler, healthCheck: healthCheck}
+ })
+ cmd.(*command).ctx = context.WithValue(ctx, contextKey, "bar")
+
+ exited := make(chan bool)
+ var stdin, stdout, stderr bytes.Buffer
+
+ go func() {
+ cmd.RunCommand(string(serviceName), []string{"-config", cf.Name()}, &stdin, &stdout, &stderr)
+ close(exited)
+ }()
+ select {
+ case <-healthCheck:
+ case <-exited:
+ c.Logf("%s", stderr.String())
+ c.Error("command exited without health check")
+ }
+ client := http.Client{}
+ deadline := time.Now().Add(time.Second * 2)
+ var activeReqs sync.WaitGroup
+
+ // Start some API reqs
+ var apiResp200, apiResp503 int64
+ for i := 0; i < max+1; i++ {
+ activeReqs.Add(1)
+ go func() {
+ defer activeReqs.Done()
+ target := "http://localhost:" + port + "/testpath"
+ resp, err := client.Get(target)
+ for err != nil && strings.Contains(err.Error(), "dial tcp") && deadline.After(time.Now()) {
+ time.Sleep(time.Second / 100)
+ resp, err = client.Get(target)
+ }
+ if c.Check(err, check.IsNil) {
+ if resp.StatusCode == http.StatusOK {
+ atomic.AddInt64(&apiResp200, 1)
+ } else if resp.StatusCode == http.StatusServiceUnavailable {
+ atomic.AddInt64(&apiResp503, 1)
+ }
+ }
+ }()
+ }
+
+ // Start some gateway tunnel reqs that don't count toward our
+ // API req limit
+ extraTunnelReqs := 20
+ var tunnelResp200, tunnelResp503 int64
+ var paths = []string{
+ "/" + strings.Replace(arvados.EndpointContainerSSH.Path, "{uuid}", "z1234-dz642-abcdeabcdeabcde", -1),
+ "/" + strings.Replace(arvados.EndpointContainerSSHCompat.Path, "{uuid}", "z1234-dz642-abcdeabcdeabcde", -1),
+ "/" + strings.Replace(arvados.EndpointContainerGatewayTunnel.Path, "{uuid}", "z1234-dz642-abcdeabcdeabcde", -1),
+ "/" + strings.Replace(arvados.EndpointContainerGatewayTunnelCompat.Path, "{uuid}", "z1234-dz642-abcdeabcdeabcde", -1),
+ }
+ for i := 0; i < maxTunnels+extraTunnelReqs; i++ {
+ i := i
+ activeReqs.Add(1)
+ go func() {
+ defer activeReqs.Done()
+ target := "http://localhost:" + port + paths[i%len(paths)]
+ resp, err := client.Post(target, "application/octet-stream", nil)
+ for err != nil && strings.Contains(err.Error(), "dial tcp") && deadline.After(time.Now()) {
+ time.Sleep(time.Second / 100)
+ resp, err = client.Post(target, "application/octet-stream", nil)
+ }
+ if c.Check(err, check.IsNil) {
+ if resp.StatusCode == http.StatusOK {
+ atomic.AddInt64(&tunnelResp200, 1)
+ } else if resp.StatusCode == http.StatusServiceUnavailable {
+ atomic.AddInt64(&tunnelResp503, 1)
+ } else {
+ c.Errorf("tunnel response code %d", resp.StatusCode)
+ }
+ }
+ }()
+ }
+ for i := 0; i < max; i++ {
+ select {
+ case <-started:
+ case <-time.After(time.Second):
+ c.Logf("%s", stderr.String())
+ c.Logf("apiResp200 %d", apiResp200)
+ c.Logf("apiResp503 %d", apiResp503)
+ c.Logf("tunnelResp200 %d", tunnelResp200)
+ c.Logf("tunnelResp503 %d", tunnelResp503)
+ c.Fatal("timed out")
+ }
+ }
+ for delay := time.Second / 100; ; delay = delay * 2 {
+ time.Sleep(delay)
+ j, err := os.ReadFile(tmpdir + "/" + string(serviceName) + "-requests.json")
+ if os.IsNotExist(err) && deadline.After(time.Now()) {
+ continue
+ }
+ c.Assert(err, check.IsNil)
+ c.Logf("stderr:\n%s", stderr.String())
+ c.Logf("json:\n%s", string(j))
+
+ var loaded []struct{ URL string }
+ err = json.Unmarshal(j, &loaded)
+ c.Check(err, check.IsNil)
+
+ for i := 0; i < len(loaded); i++ {
+ if strings.Contains(loaded[i].URL, "/ssh") || strings.Contains(loaded[i].URL, "/gateway_tunnel") {
+ // Filter out a gateway tunnel req
+ // that doesn't count toward our API
+ // req limit
+ if i < len(loaded)-1 {
+ copy(loaded[i:], loaded[i+1:])
+ i--
+ }
+ loaded = loaded[:len(loaded)-1]
+ }
+ }
+
+ if len(loaded) < max {
+ // Dumped when #requests was >90% but <100% of
+ // limit. If we stop now, we won't be able to
+ // confirm (below) that management endpoints
+ // are still accessible when normal requests
+ // are at 100%.
+ c.Logf("loaded dumped requests, but len %d < max %d -- still waiting", len(loaded), max)
+ continue
+ }
+ c.Check(loaded, check.HasLen, max+1)
+ c.Check(loaded[0].URL, check.Equals, "/testpath")
+ break
+ }
+
+ for _, path := range []string{"/_inspect/requests", "/metrics"} {
+ req, err := http.NewRequest("GET", "http://localhost:"+port+""+path, nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "Bearer bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
+ resp, err := client.Do(req)
+ if !c.Check(err, check.IsNil) {
+ break
+ }
+ c.Logf("got response for %s", path)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+ switch path {
+ case "/metrics":
+ c.Check(string(buf), check.Matches, `(?ms).*arvados_concurrent_requests{queue="api"} `+fmt.Sprintf("%d", max)+`\n.*`)
+ c.Check(string(buf), check.Matches, `(?ms).*arvados_queued_requests{priority="normal",queue="api"} 1\n.*`)
+ case "/_inspect/requests":
+ c.Check(string(buf), check.Matches, `(?ms).*"URL":"/testpath".*`)
+ default:
+ c.Error("oops, testing bug")
+ }
+ }
+ close(hold)
+ activeReqs.Wait()
+ c.Check(int(apiResp200), check.Equals, max+1)
+ c.Check(int(apiResp503), check.Equals, 0)
+ c.Check(int(tunnelResp200), check.Equals, maxTunnels)
+ c.Check(int(tunnelResp503), check.Equals, extraTunnelReqs)
+ cancel()
+}
+
func (*Suite) TestTLS(c *check.C) {
+ port := unusedPort(c)
cwd, err := os.Getwd()
c.Assert(err, check.IsNil)
@@ -202,8 +435,8 @@ Clusters:
SystemRootToken: abcde
Services:
Controller:
- ExternalURL: "https://localhost:12345"
- InternalURLs: {"https://localhost:12345": {}}
+ ExternalURL: "https://localhost:` + port + `"
+ InternalURLs: {"https://localhost:` + port + `": {}}
TLS:
Key: file://` + cwd + `/../../services/api/tmp/self-signed.key
Certificate: file://` + cwd + `/../../services/api/tmp/self-signed.pem
@@ -228,7 +461,7 @@ Clusters:
defer close(got)
client := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
for range time.NewTicker(time.Millisecond).C {
- resp, err := client.Get("https://localhost:12345")
+ resp, err := client.Get("https://localhost:" + port)
if err != nil {
c.Log(err)
continue
diff --git a/lib/webdavfs/fs.go b/lib/webdavfs/fs.go
new file mode 100644
index 0000000000..eaa1a5a0c7
--- /dev/null
+++ b/lib/webdavfs/fs.go
@@ -0,0 +1,180 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Package webdavfs adds special behaviors to an arvados.FileSystem so
+// it's suitable to use with a webdav server.
+package webdavfs
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ prand "math/rand"
+ "os"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "golang.org/x/net/webdav"
+)
+
+var (
+ lockPrefix string = uuid()
+ nextLockSuffix int64 = prand.Int63()
+ ErrReadOnly = errors.New("read-only filesystem")
+)
+
+// FS implements a webdav.FileSystem by wrapping an
+// arvados.CollectionFilesystem.
+type FS struct {
+ FileSystem arvados.FileSystem
+ // Prefix works like fs.Sub: Stat(name) calls
+ // Stat(prefix+name) in the wrapped filesystem.
+ Prefix string
+ // If Writing is false, all write operations return errors.
+ // (Opening a file for writing succeeds -- otherwise webdav
+ // would return 404 -- but writing to it fails.)
+ Writing bool
+ // webdav PROPFIND reads the first few bytes of each file
+ // whose filename extension isn't recognized, which is
+ // prohibitively expensive: we end up fetching multiple 64MiB
+ // blocks. Avoid this by returning EOF on all reads when
+ // handling a PROPFIND.
+ AlwaysReadEOF bool
+}
+
+func (fs *FS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+ if !fs.Writing {
+ return ErrReadOnly
+ }
+ name = strings.TrimRight(name, "/")
+ return fs.FileSystem.Mkdir(fs.Prefix+name, 0755)
+}
+
+func (fs *FS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (f webdav.File, err error) {
+ writing := flag&(os.O_WRONLY|os.O_RDWR|os.O_TRUNC) != 0
+ f, err = fs.FileSystem.OpenFile(fs.Prefix+name, flag, perm)
+ if !fs.Writing {
+ // webdav module returns 404 on all OpenFile errors,
+ // but returns 405 Method Not Allowed if OpenFile()
+ // succeeds but Write() or Close() fails. We'd rather
+ // have 405. writeFailer ensures Close() fails if the
+ // file is opened for writing *or* Write() is called.
+ var err error
+ if writing {
+ err = ErrReadOnly
+ }
+ f = writeFailer{File: f, err: err}
+ }
+ if fs.AlwaysReadEOF {
+ f = readEOF{File: f}
+ }
+ return
+}
+
+func (fs *FS) RemoveAll(ctx context.Context, name string) error {
+ return fs.FileSystem.RemoveAll(fs.Prefix + name)
+}
+
+func (fs *FS) Rename(ctx context.Context, oldName, newName string) error {
+ if !fs.Writing {
+ return ErrReadOnly
+ }
+ if strings.HasSuffix(oldName, "/") {
+ // WebDAV "MOVE foo/ bar/" means rename foo to bar.
+ oldName = oldName[:len(oldName)-1]
+ newName = strings.TrimSuffix(newName, "/")
+ }
+ return fs.FileSystem.Rename(fs.Prefix+oldName, fs.Prefix+newName)
+}
+
+func (fs *FS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+ return fs.FileSystem.Stat(fs.Prefix + name)
+}
+
+type writeFailer struct {
+ webdav.File
+ err error
+}
+
+func (wf writeFailer) Write([]byte) (int, error) {
+ wf.err = ErrReadOnly
+ return 0, wf.err
+}
+
+func (wf writeFailer) Close() error {
+ err := wf.File.Close()
+ if err != nil {
+ wf.err = err
+ }
+ return wf.err
+}
+
+type readEOF struct {
+ webdav.File
+}
+
+func (readEOF) Read(p []byte) (int, error) {
+ return 0, io.EOF
+}
+
+// NoLockSystem implements webdav.LockSystem by returning success for
+// every possible locking operation, even though it has no side
+// effects such as actually locking anything. This works for a
+// read-only webdav filesystem because webdav locks only apply to
+// writes.
+//
+// This is more suitable than webdav.NewMemLS() for two reasons:
+// First, it allows keep-web to use one locker for all collections
+// even though coll1.vhost/foo and coll2.vhost/foo have the same path
+// but represent different resources. Additionally, it returns valid
+// tokens (rfc2518 specifies that tokens are represented as URIs and
+// are unique across all resources for all time), which might improve
+// client compatibility.
+//
+// However, it does also permit impossible operations, like acquiring
+// conflicting locks and releasing non-existent locks. This might
+// confuse some clients if they try to probe for correctness.
+//
+// Currently this is a moot point: the LOCK and UNLOCK methods are not
+// accepted by keep-web, so it suffices to implement the
+// webdav.LockSystem interface.
+var NoLockSystem = noLockSystem{}
+
+type noLockSystem struct{}
+
+func (noLockSystem) Confirm(time.Time, string, string, ...webdav.Condition) (func(), error) {
+ return noop, nil
+}
+
+func (noLockSystem) Create(now time.Time, details webdav.LockDetails) (token string, err error) {
+ return fmt.Sprintf("opaquelocktoken:%s-%x", lockPrefix, atomic.AddInt64(&nextLockSuffix, 1)), nil
+}
+
+func (noLockSystem) Refresh(now time.Time, token string, duration time.Duration) (webdav.LockDetails, error) {
+ return webdav.LockDetails{}, nil
+}
+
+func (noLockSystem) Unlock(now time.Time, token string) error {
+ return nil
+}
+
+func noop() {}
+
+// Return a version 1 variant 4 UUID, meaning all bits are random
+// except the ones indicating the version and variant.
+func uuid() string {
+ var data [16]byte
+ if _, err := rand.Read(data[:]); err != nil {
+ panic(err)
+ }
+ // variant 1: N=10xx
+ data[8] = data[8]&0x3f | 0x80
+ // version 4: M=0100
+ data[6] = data[6]&0x0f | 0x40
+ return fmt.Sprintf("%x-%x-%x-%x-%x", data[0:4], data[4:6], data[6:8], data[8:10], data[10:])
+}
diff --git a/lib/webdavfs/fs_test.go b/lib/webdavfs/fs_test.go
new file mode 100644
index 0000000000..1a6085d1f5
--- /dev/null
+++ b/lib/webdavfs/fs_test.go
@@ -0,0 +1,9 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package webdavfs
+
+import "golang.org/x/net/webdav"
+
+var _ webdav.FileSystem = &FS{}
diff --git a/sdk/R/DESCRIPTION b/sdk/R/DESCRIPTION
index 9c682412a0..c6c01adebd 100644
--- a/sdk/R/DESCRIPTION
+++ b/sdk/R/DESCRIPTION
@@ -1,15 +1,17 @@
Package: ArvadosR
Type: Package
Title: Arvados R SDK
-Version: 2.4.0
+Version: 2.6.0
Authors@R: c(person("Fuad", "Muhic", role = c("aut", "ctr"), email = "fmuhic@capeannenterprises.com"),
- person("Peter", "Amstutz", role = c("cre"), email = "peter.amstutz@curii.com"))
+ person("Peter", "Amstutz", role = c("cre"), email = "peter.amstutz@curii.com"),
+ person("Piotr", "Nowosielski", role = c("aut"), email = "piotr.nowosielski@contractors.roche.com"),
+ person("Aneta", "Stanczyk", role = c("aut"), email = "aneta.stanczyk@contractors.roche.com"))
Description: This is the Arvados R SDK
URL: http://doc.arvados.org
License: Apache-2.0
Encoding: UTF-8
LazyData: true
-RoxygenNote: 6.0.1.9000
+RoxygenNote: 7.2.3
Imports:
R6,
httr,
diff --git a/sdk/R/R/Arvados.R b/sdk/R/R/Arvados.R
index 52d6c95f5e..ed65d1fc4c 100644
--- a/sdk/R/R/Arvados.R
+++ b/sdk/R/R/Arvados.R
@@ -2,4012 +2,3321 @@
#
# SPDX-License-Identifier: Apache-2.0
-#' api_clients.get
+#' R6 Class Representing a Arvados
#'
-#' api_clients.get is a method defined in Arvados class.
-#'
-#' @usage arv$api_clients.get(uuid)
-#' @param uuid The UUID of the ApiClient in question.
-#' @return ApiClient object.
-#' @name api_clients.get
-NULL
-
-#' api_clients.create
-#'
-#' api_clients.create is a method defined in Arvados class.
-#'
-#' @usage arv$api_clients.create(apiclient,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param apiClient ApiClient object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return ApiClient object.
-#' @name api_clients.create
-NULL
-
-#' api_clients.update
-#'
-#' api_clients.update is a method defined in Arvados class.
-#'
-#' @usage arv$api_clients.update(apiclient,
-#' uuid)
-#' @param apiClient ApiClient object.
-#' @param uuid The UUID of the ApiClient in question.
-#' @return ApiClient object.
-#' @name api_clients.update
-NULL
-
-#' api_clients.delete
-#'
-#' api_clients.delete is a method defined in Arvados class.
-#'
-#' @usage arv$api_clients.delete(uuid)
-#' @param uuid The UUID of the ApiClient in question.
-#' @return ApiClient object.
-#' @name api_clients.delete
-NULL
-
-#' api_clients.list
-#'
-#' api_clients.list is a method defined in Arvados class.
-#'
-#' @usage arv$api_clients.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return ApiClientList object.
-#' @name api_clients.list
-NULL
-
-#' api_client_authorizations.get
-#'
-#' api_client_authorizations.get is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.get(uuid)
-#' @param uuid The UUID of the ApiClientAuthorization in question.
-#' @return ApiClientAuthorization object.
-#' @name api_client_authorizations.get
-NULL
-
-#' api_client_authorizations.create
-#'
-#' api_client_authorizations.create is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.create(apiclientauthorization,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param apiClientAuthorization ApiClientAuthorization object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return ApiClientAuthorization object.
-#' @name api_client_authorizations.create
-NULL
-
-#' api_client_authorizations.update
-#'
-#' api_client_authorizations.update is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.update(apiclientauthorization,
-#' uuid)
-#' @param apiClientAuthorization ApiClientAuthorization object.
-#' @param uuid The UUID of the ApiClientAuthorization in question.
-#' @return ApiClientAuthorization object.
-#' @name api_client_authorizations.update
-NULL
-
-#' api_client_authorizations.delete
-#'
-#' api_client_authorizations.delete is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.delete(uuid)
-#' @param uuid The UUID of the ApiClientAuthorization in question.
-#' @return ApiClientAuthorization object.
-#' @name api_client_authorizations.delete
-NULL
-
-#' api_client_authorizations.create_system_auth
-#'
-#' api_client_authorizations.create_system_auth is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.create_system_auth(api_client_id = NULL,
-#' scopes = NULL)
-#' @param api_client_id
-#' @param scopes
-#' @return ApiClientAuthorization object.
-#' @name api_client_authorizations.create_system_auth
-NULL
-
-#' api_client_authorizations.current
-#'
-#' api_client_authorizations.current is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.current(NULL)
-#' @return ApiClientAuthorization object.
-#' @name api_client_authorizations.current
-NULL
-
-#' api_client_authorizations.list
-#'
-#' api_client_authorizations.list is a method defined in Arvados class.
-#'
-#' @usage arv$api_client_authorizations.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return ApiClientAuthorizationList object.
-#' @name api_client_authorizations.list
-NULL
-
-#' authorized_keys.get
-#'
-#' authorized_keys.get is a method defined in Arvados class.
-#'
-#' @usage arv$authorized_keys.get(uuid)
-#' @param uuid The UUID of the AuthorizedKey in question.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.get
-NULL
-
-#' authorized_keys.create
-#'
-#' authorized_keys.create is a method defined in Arvados class.
-#'
-#' @usage arv$authorized_keys.create(authorizedkey,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param authorizedKey AuthorizedKey object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.create
-NULL
-
-#' authorized_keys.update
-#'
-#' authorized_keys.update is a method defined in Arvados class.
-#'
-#' @usage arv$authorized_keys.update(authorizedkey,
-#' uuid)
-#' @param authorizedKey AuthorizedKey object.
-#' @param uuid The UUID of the AuthorizedKey in question.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.update
-NULL
-
-#' authorized_keys.delete
-#'
-#' authorized_keys.delete is a method defined in Arvados class.
-#'
-#' @usage arv$authorized_keys.delete(uuid)
-#' @param uuid The UUID of the AuthorizedKey in question.
-#' @return AuthorizedKey object.
-#' @name authorized_keys.delete
-NULL
-
-#' authorized_keys.list
-#'
-#' authorized_keys.list is a method defined in Arvados class.
-#'
-#' @usage arv$authorized_keys.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return AuthorizedKeyList object.
-#' @name authorized_keys.list
-NULL
-
-#' collections.get
-#'
-#' collections.get is a method defined in Arvados class.
-#'
-#' @usage arv$collections.get(uuid)
-#' @param uuid The UUID of the Collection in question.
-#' @return Collection object.
-#' @name collections.get
-NULL
-
-#' collections.create
-#'
-#' collections.create is a method defined in Arvados class.
-#'
-#' @usage arv$collections.create(collection,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param collection Collection object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return Collection object.
-#' @name collections.create
-NULL
-
-#' collections.update
-#'
-#' collections.update is a method defined in Arvados class.
-#'
-#' @usage arv$collections.update(collection,
-#' uuid)
-#' @param collection Collection object.
-#' @param uuid The UUID of the Collection in question.
-#' @return Collection object.
-#' @name collections.update
-NULL
-
-#' collections.delete
-#'
-#' collections.delete is a method defined in Arvados class.
-#'
-#' @usage arv$collections.delete(uuid)
-#' @param uuid The UUID of the Collection in question.
-#' @return Collection object.
-#' @name collections.delete
-NULL
-
-#' collections.provenance
-#'
-#' collections.provenance is a method defined in Arvados class.
-#'
-#' @usage arv$collections.provenance(uuid)
-#' @param uuid
-#' @return Collection object.
-#' @name collections.provenance
-NULL
-
-#' collections.used_by
-#'
-#' collections.used_by is a method defined in Arvados class.
-#'
-#' @usage arv$collections.used_by(uuid)
-#' @param uuid
-#' @return Collection object.
-#' @name collections.used_by
-NULL
-
-#' collections.trash
-#'
-#' collections.trash is a method defined in Arvados class.
-#'
-#' @usage arv$collections.trash(uuid)
-#' @param uuid
-#' @return Collection object.
-#' @name collections.trash
-NULL
-
-#' collections.untrash
-#'
-#' collections.untrash is a method defined in Arvados class.
-#'
-#' @usage arv$collections.untrash(uuid)
-#' @param uuid
-#' @return Collection object.
-#' @name collections.untrash
-NULL
-
-#' collections.list
-#'
-#' collections.list is a method defined in Arvados class.
-#'
-#' @usage arv$collections.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL,
-#' include_trash = NULL, include_old_versions = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @param include_trash Include collections whose is_trashed attribute is true.
-#' @param include_old_versions Include past collection versions.
-#' @return CollectionList object.
-#' @name collections.list
-NULL
-
-#' containers.get
-#'
-#' containers.get is a method defined in Arvados class.
-#'
-#' @usage arv$containers.get(uuid)
-#' @param uuid The UUID of the Container in question.
-#' @return Container object.
-#' @name containers.get
-NULL
-
-#' containers.create
-#'
-#' containers.create is a method defined in Arvados class.
-#'
-#' @usage arv$containers.create(container,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param container Container object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return Container object.
-#' @name containers.create
-NULL
-
-#' containers.update
-#'
-#' containers.update is a method defined in Arvados class.
-#'
-#' @usage arv$containers.update(container,
-#' uuid)
-#' @param container Container object.
-#' @param uuid The UUID of the Container in question.
-#' @return Container object.
-#' @name containers.update
-NULL
-
-#' containers.delete
-#'
-#' containers.delete is a method defined in Arvados class.
-#'
-#' @usage arv$containers.delete(uuid)
-#' @param uuid The UUID of the Container in question.
-#' @return Container object.
-#' @name containers.delete
-NULL
-
-#' containers.auth
-#'
-#' containers.auth is a method defined in Arvados class.
-#'
-#' @usage arv$containers.auth(uuid)
-#' @param uuid
-#' @return Container object.
-#' @name containers.auth
-NULL
-
-#' containers.lock
-#'
-#' containers.lock is a method defined in Arvados class.
-#'
-#' @usage arv$containers.lock(uuid)
-#' @param uuid
-#' @return Container object.
-#' @name containers.lock
-NULL
-
-#' containers.unlock
-#'
-#' containers.unlock is a method defined in Arvados class.
-#'
-#' @usage arv$containers.unlock(uuid)
-#' @param uuid
-#' @return Container object.
-#' @name containers.unlock
-NULL
-
-#' containers.secret_mounts
-#'
-#' containers.secret_mounts is a method defined in Arvados class.
-#'
-#' @usage arv$containers.secret_mounts(uuid)
-#' @param uuid
-#' @return Container object.
-#' @name containers.secret_mounts
-NULL
-
-#' containers.current
-#'
-#' containers.current is a method defined in Arvados class.
-#'
-#' @usage arv$containers.current(NULL)
-#' @return Container object.
-#' @name containers.current
-NULL
-
-#' containers.list
-#'
-#' containers.list is a method defined in Arvados class.
-#'
-#' @usage arv$containers.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return ContainerList object.
-#' @name containers.list
-NULL
-
-#' container_requests.get
-#'
-#' container_requests.get is a method defined in Arvados class.
-#'
-#' @usage arv$container_requests.get(uuid)
-#' @param uuid The UUID of the ContainerRequest in question.
-#' @return ContainerRequest object.
-#' @name container_requests.get
-NULL
-
-#' container_requests.create
-#'
-#' container_requests.create is a method defined in Arvados class.
-#'
-#' @usage arv$container_requests.create(containerrequest,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param containerRequest ContainerRequest object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return ContainerRequest object.
-#' @name container_requests.create
-NULL
-
-#' container_requests.update
-#'
-#' container_requests.update is a method defined in Arvados class.
-#'
-#' @usage arv$container_requests.update(containerrequest,
-#' uuid)
-#' @param containerRequest ContainerRequest object.
-#' @param uuid The UUID of the ContainerRequest in question.
-#' @return ContainerRequest object.
-#' @name container_requests.update
-NULL
-
-#' container_requests.delete
-#'
-#' container_requests.delete is a method defined in Arvados class.
-#'
-#' @usage arv$container_requests.delete(uuid)
-#' @param uuid The UUID of the ContainerRequest in question.
-#' @return ContainerRequest object.
-#' @name container_requests.delete
-NULL
-
-#' container_requests.list
-#'
-#' container_requests.list is a method defined in Arvados class.
-#'
-#' @usage arv$container_requests.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL,
-#' include_trash = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @param include_trash Include container requests whose owner project is trashed.
-#' @return ContainerRequestList object.
-#' @name container_requests.list
-NULL
-
-#' groups.get
-#'
-#' groups.get is a method defined in Arvados class.
-#'
-#' @usage arv$groups.get(uuid)
-#' @param uuid The UUID of the Group in question.
-#' @return Group object.
-#' @name groups.get
-NULL
-
-#' groups.create
-#'
-#' groups.create is a method defined in Arvados class.
-#'
-#' @usage arv$groups.create(group, ensure_unique_name = "false",
-#' cluster_id = NULL, async = "false")
-#' @param group Group object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @param async defer permissions update
-#' @return Group object.
-#' @name groups.create
-NULL
-
-#' groups.update
-#'
-#' groups.update is a method defined in Arvados class.
-#'
-#' @usage arv$groups.update(group, uuid,
-#' async = "false")
-#' @param group Group object.
-#' @param uuid The UUID of the Group in question.
-#' @param async defer permissions update
-#' @return Group object.
-#' @name groups.update
-NULL
-
-#' groups.delete
-#'
-#' groups.delete is a method defined in Arvados class.
-#'
-#' @usage arv$groups.delete(uuid)
-#' @param uuid The UUID of the Group in question.
-#' @return Group object.
-#' @name groups.delete
-NULL
-
-#' groups.contents
-#'
-#' groups.contents is a method defined in Arvados class.
-#'
-#' @usage arv$groups.contents(filters = NULL,
-#' where = NULL, order = NULL, distinct = NULL,
-#' limit = "100", offset = "0", count = "exact",
-#' cluster_id = NULL, bypass_federation = NULL,
-#' include_trash = NULL, uuid = NULL, recursive = NULL,
-#' include = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @param include_trash Include items whose is_trashed attribute is true.
-#' @param uuid
-#' @param recursive Include contents from child groups recursively.
-#' @param include Include objects referred to by listed field in "included" (only owner_uuid)
-#' @return Group object.
-#' @name groups.contents
-NULL
-
-#' groups.shared
-#'
-#' groups.shared is a method defined in Arvados class.
-#'
-#' @usage arv$groups.shared(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL,
-#' include_trash = NULL, include = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @param include_trash Include items whose is_trashed attribute is true.
-#' @param include
-#' @return Group object.
-#' @name groups.shared
-NULL
-
-#' groups.trash
-#'
-#' groups.trash is a method defined in Arvados class.
-#'
-#' @usage arv$groups.trash(uuid)
-#' @param uuid
-#' @return Group object.
-#' @name groups.trash
-NULL
-
-#' groups.untrash
-#'
-#' groups.untrash is a method defined in Arvados class.
-#'
-#' @usage arv$groups.untrash(uuid)
-#' @param uuid
-#' @return Group object.
-#' @name groups.untrash
-NULL
-
-#' groups.list
-#'
-#' groups.list is a method defined in Arvados class.
-#'
-#' @usage arv$groups.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL,
-#' include_trash = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @param include_trash Include items whose is_trashed attribute is true.
-#' @return GroupList object.
-#' @name groups.list
-NULL
-
-#' keep_services.get
-#'
-#' keep_services.get is a method defined in Arvados class.
-#'
-#' @usage arv$keep_services.get(uuid)
-#' @param uuid The UUID of the KeepService in question.
-#' @return KeepService object.
-#' @name keep_services.get
-NULL
-
-#' keep_services.create
-#'
-#' keep_services.create is a method defined in Arvados class.
-#'
-#' @usage arv$keep_services.create(keepservice,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param keepService KeepService object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return KeepService object.
-#' @name keep_services.create
-NULL
-
-#' keep_services.update
-#'
-#' keep_services.update is a method defined in Arvados class.
-#'
-#' @usage arv$keep_services.update(keepservice,
-#' uuid)
-#' @param keepService KeepService object.
-#' @param uuid The UUID of the KeepService in question.
-#' @return KeepService object.
-#' @name keep_services.update
-NULL
-
-#' keep_services.delete
-#'
-#' keep_services.delete is a method defined in Arvados class.
-#'
-#' @usage arv$keep_services.delete(uuid)
-#' @param uuid The UUID of the KeepService in question.
-#' @return KeepService object.
-#' @name keep_services.delete
-NULL
-
-#' keep_services.accessible
-#'
-#' keep_services.accessible is a method defined in Arvados class.
-#'
-#' @usage arv$keep_services.accessible(NULL)
-#' @return KeepService object.
-#' @name keep_services.accessible
-NULL
-
-#' keep_services.list
-#'
-#' keep_services.list is a method defined in Arvados class.
-#'
-#' @usage arv$keep_services.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return KeepServiceList object.
-#' @name keep_services.list
-NULL
-
-#' links.get
-#'
-#' links.get is a method defined in Arvados class.
-#'
-#' @usage arv$links.get(uuid)
-#' @param uuid The UUID of the Link in question.
-#' @return Link object.
-#' @name links.get
-NULL
-
-#' links.create
-#'
-#' links.create is a method defined in Arvados class.
-#'
-#' @usage arv$links.create(link, ensure_unique_name = "false",
-#' cluster_id = NULL)
-#' @param link Link object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return Link object.
-#' @name links.create
-NULL
-
-#' links.update
-#'
-#' links.update is a method defined in Arvados class.
-#'
-#' @usage arv$links.update(link, uuid)
-#' @param link Link object.
-#' @param uuid The UUID of the Link in question.
-#' @return Link object.
-#' @name links.update
-NULL
-
-#' links.delete
-#'
-#' links.delete is a method defined in Arvados class.
-#'
-#' @usage arv$links.delete(uuid)
-#' @param uuid The UUID of the Link in question.
-#' @return Link object.
-#' @name links.delete
-NULL
-
-#' links.list
-#'
-#' links.list is a method defined in Arvados class.
-#'
-#' @usage arv$links.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return LinkList object.
-#' @name links.list
-NULL
-
-#' links.get_permissions
-#'
-#' links.get_permissions is a method defined in Arvados class.
-#'
-#' @usage arv$links.get_permissions(uuid)
-#' @param uuid
-#' @return Link object.
-#' @name links.get_permissions
-NULL
-
-#' logs.get
-#'
-#' logs.get is a method defined in Arvados class.
-#'
-#' @usage arv$logs.get(uuid)
-#' @param uuid The UUID of the Log in question.
-#' @return Log object.
-#' @name logs.get
-NULL
-
-#' logs.create
-#'
-#' logs.create is a method defined in Arvados class.
-#'
-#' @usage arv$logs.create(log, ensure_unique_name = "false",
-#' cluster_id = NULL)
-#' @param log Log object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return Log object.
-#' @name logs.create
-NULL
-
-#' logs.update
-#'
-#' logs.update is a method defined in Arvados class.
-#'
-#' @usage arv$logs.update(log, uuid)
-#' @param log Log object.
-#' @param uuid The UUID of the Log in question.
-#' @return Log object.
-#' @name logs.update
-NULL
-
-#' logs.delete
-#'
-#' logs.delete is a method defined in Arvados class.
-#'
-#' @usage arv$logs.delete(uuid)
-#' @param uuid The UUID of the Log in question.
-#' @return Log object.
-#' @name logs.delete
-NULL
-
-#' logs.list
-#'
-#' logs.list is a method defined in Arvados class.
-#'
-#' @usage arv$logs.list(filters = NULL, where = NULL,
-#' order = NULL, select = NULL, distinct = NULL,
-#' limit = "100", offset = "0", count = "exact",
-#' cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return LogList object.
-#' @name logs.list
-NULL
-
-#' users.get
-#'
-#' users.get is a method defined in Arvados class.
-#'
-#' @usage arv$users.get(uuid)
-#' @param uuid The UUID of the User in question.
-#' @return User object.
-#' @name users.get
-NULL
-
-#' users.create
-#'
-#' users.create is a method defined in Arvados class.
-#'
-#' @usage arv$users.create(user, ensure_unique_name = "false",
-#' cluster_id = NULL)
-#' @param user User object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return User object.
-#' @name users.create
-NULL
-
-#' users.update
-#'
-#' users.update is a method defined in Arvados class.
-#'
-#' @usage arv$users.update(user, uuid, bypass_federation = NULL)
-#' @param user User object.
-#' @param uuid The UUID of the User in question.
-#' @param bypass_federation
-#' @return User object.
-#' @name users.update
-NULL
-
-#' users.delete
-#'
-#' users.delete is a method defined in Arvados class.
-#'
-#' @usage arv$users.delete(uuid)
-#' @param uuid The UUID of the User in question.
-#' @return User object.
-#' @name users.delete
-NULL
-
-#' users.current
-#'
-#' users.current is a method defined in Arvados class.
-#'
-#' @usage arv$users.current(NULL)
-#' @return User object.
-#' @name users.current
-NULL
-
-#' users.system
-#'
-#' users.system is a method defined in Arvados class.
-#'
-#' @usage arv$users.system(NULL)
-#' @return User object.
-#' @name users.system
-NULL
-
-#' users.activate
-#'
-#' users.activate is a method defined in Arvados class.
-#'
-#' @usage arv$users.activate(uuid)
-#' @param uuid
-#' @return User object.
-#' @name users.activate
-NULL
-
-#' users.setup
-#'
-#' users.setup is a method defined in Arvados class.
-#'
-#' @usage arv$users.setup(uuid = NULL, user = NULL,
-#' repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
-#' @param uuid
-#' @param user
-#' @param repo_name
-#' @param vm_uuid
-#' @param send_notification_email
-#' @return User object.
-#' @name users.setup
-NULL
-
-#' users.unsetup
-#'
-#' users.unsetup is a method defined in Arvados class.
-#'
-#' @usage arv$users.unsetup(uuid)
-#' @param uuid
-#' @return User object.
-#' @name users.unsetup
-NULL
-
-#' users.merge
-#'
-#' users.merge is a method defined in Arvados class.
-#'
-#' @usage arv$users.merge(new_owner_uuid,
-#' new_user_token = NULL, redirect_to_new_user = NULL,
-#' old_user_uuid = NULL, new_user_uuid = NULL)
-#' @param new_owner_uuid
-#' @param new_user_token
-#' @param redirect_to_new_user
-#' @param old_user_uuid
-#' @param new_user_uuid
-#' @return User object.
-#' @name users.merge
-NULL
-
-#' users.list
-#'
-#' users.list is a method defined in Arvados class.
-#'
-#' @usage arv$users.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return UserList object.
-#' @name users.list
-NULL
-
-#' repositories.get
-#'
-#' repositories.get is a method defined in Arvados class.
-#'
-#' @usage arv$repositories.get(uuid)
-#' @param uuid The UUID of the Repository in question.
-#' @return Repository object.
-#' @name repositories.get
-NULL
-
-#' repositories.create
-#'
-#' repositories.create is a method defined in Arvados class.
-#'
-#' @usage arv$repositories.create(repository,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param repository Repository object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return Repository object.
-#' @name repositories.create
-NULL
-
-#' repositories.update
-#'
-#' repositories.update is a method defined in Arvados class.
-#'
-#' @usage arv$repositories.update(repository,
-#' uuid)
-#' @param repository Repository object.
-#' @param uuid The UUID of the Repository in question.
-#' @return Repository object.
-#' @name repositories.update
-NULL
-
-#' repositories.delete
-#'
-#' repositories.delete is a method defined in Arvados class.
-#'
-#' @usage arv$repositories.delete(uuid)
-#' @param uuid The UUID of the Repository in question.
-#' @return Repository object.
-#' @name repositories.delete
-NULL
-
-#' repositories.get_all_permissions
-#'
-#' repositories.get_all_permissions is a method defined in Arvados class.
-#'
-#' @usage arv$repositories.get_all_permissions(NULL)
-#' @return Repository object.
-#' @name repositories.get_all_permissions
-NULL
-
-#' repositories.list
-#'
-#' repositories.list is a method defined in Arvados class.
-#'
-#' @usage arv$repositories.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return RepositoryList object.
-#' @name repositories.list
-NULL
-
-#' virtual_machines.get
-#'
-#' virtual_machines.get is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.get(uuid)
-#' @param uuid The UUID of the VirtualMachine in question.
-#' @return VirtualMachine object.
-#' @name virtual_machines.get
-NULL
-
-#' virtual_machines.create
-#'
-#' virtual_machines.create is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.create(virtualmachine,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param virtualMachine VirtualMachine object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return VirtualMachine object.
-#' @name virtual_machines.create
-NULL
-
-#' virtual_machines.update
-#'
-#' virtual_machines.update is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.update(virtualmachine,
-#' uuid)
-#' @param virtualMachine VirtualMachine object.
-#' @param uuid The UUID of the VirtualMachine in question.
-#' @return VirtualMachine object.
-#' @name virtual_machines.update
-NULL
-
-#' virtual_machines.delete
-#'
-#' virtual_machines.delete is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.delete(uuid)
-#' @param uuid The UUID of the VirtualMachine in question.
-#' @return VirtualMachine object.
-#' @name virtual_machines.delete
-NULL
-
-#' virtual_machines.logins
-#'
-#' virtual_machines.logins is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.logins(uuid)
-#' @param uuid
-#' @return VirtualMachine object.
-#' @name virtual_machines.logins
-NULL
-
-#' virtual_machines.get_all_logins
-#'
-#' virtual_machines.get_all_logins is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.get_all_logins(NULL)
-#' @return VirtualMachine object.
-#' @name virtual_machines.get_all_logins
-NULL
-
-#' virtual_machines.list
-#'
-#' virtual_machines.list is a method defined in Arvados class.
-#'
-#' @usage arv$virtual_machines.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return VirtualMachineList object.
-#' @name virtual_machines.list
-NULL
-
-#' workflows.get
-#'
-#' workflows.get is a method defined in Arvados class.
-#'
-#' @usage arv$workflows.get(uuid)
-#' @param uuid The UUID of the Workflow in question.
-#' @return Workflow object.
-#' @name workflows.get
-NULL
-
-#' workflows.create
-#'
-#' workflows.create is a method defined in Arvados class.
-#'
-#' @usage arv$workflows.create(workflow,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param workflow Workflow object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return Workflow object.
-#' @name workflows.create
-NULL
-
-#' workflows.update
-#'
-#' workflows.update is a method defined in Arvados class.
-#'
-#' @usage arv$workflows.update(workflow,
-#' uuid)
-#' @param workflow Workflow object.
-#' @param uuid The UUID of the Workflow in question.
-#' @return Workflow object.
-#' @name workflows.update
-NULL
-
-#' workflows.delete
-#'
-#' workflows.delete is a method defined in Arvados class.
-#'
-#' @usage arv$workflows.delete(uuid)
-#' @param uuid The UUID of the Workflow in question.
-#' @return Workflow object.
-#' @name workflows.delete
-NULL
-
-#' workflows.list
-#'
-#' workflows.list is a method defined in Arvados class.
-#'
-#' @usage arv$workflows.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return WorkflowList object.
-#' @name workflows.list
-NULL
-
-#' user_agreements.get
-#'
-#' user_agreements.get is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.get(uuid)
-#' @param uuid The UUID of the UserAgreement in question.
-#' @return UserAgreement object.
-#' @name user_agreements.get
-NULL
-
-#' user_agreements.create
-#'
-#' user_agreements.create is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.create(useragreement,
-#' ensure_unique_name = "false", cluster_id = NULL)
-#' @param userAgreement UserAgreement object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @param cluster_id Create object on a remote federated cluster instead of the current one.
-#' @return UserAgreement object.
-#' @name user_agreements.create
-NULL
-
-#' user_agreements.update
-#'
-#' user_agreements.update is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.update(useragreement,
-#' uuid)
-#' @param userAgreement UserAgreement object.
-#' @param uuid The UUID of the UserAgreement in question.
-#' @return UserAgreement object.
-#' @name user_agreements.update
-NULL
-
-#' user_agreements.delete
-#'
-#' user_agreements.delete is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.delete(uuid)
-#' @param uuid The UUID of the UserAgreement in question.
-#' @return UserAgreement object.
-#' @name user_agreements.delete
-NULL
-
-#' user_agreements.signatures
-#'
-#' user_agreements.signatures is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.signatures(NULL)
-#' @return UserAgreement object.
-#' @name user_agreements.signatures
-NULL
+#' @description
+#' Arvados class gives users ability to access Arvados REST API. It also allowes user to manipulate collections (and projects?)
-#' user_agreements.sign
-#'
-#' user_agreements.sign is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.sign(NULL)
-#' @return UserAgreement object.
-#' @name user_agreements.sign
-NULL
-
-#' user_agreements.list
-#'
-#' user_agreements.list is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.list(filters = NULL,
-#' where = NULL, order = NULL, select = NULL,
-#' distinct = NULL, limit = "100", offset = "0",
-#' count = "exact", cluster_id = NULL, bypass_federation = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param select
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param cluster_id List objects on a remote federated cluster instead of the current one.
-#' @param bypass_federation bypass federation behavior, list items from local instance database only
-#' @return UserAgreementList object.
-#' @name user_agreements.list
-NULL
-
-#' user_agreements.new
-#'
-#' user_agreements.new is a method defined in Arvados class.
-#'
-#' @usage arv$user_agreements.new(NULL)
-#' @return UserAgreement object.
-#' @name user_agreements.new
-NULL
-
-#' configs.get
-#'
-#' configs.get is a method defined in Arvados class.
-#'
-#' @usage arv$configs.get(NULL)
-#' @return object.
-#' @name configs.get
-NULL
-
-#' project.get
-#'
-#' projects.get is equivalent to groups.get method.
-#'
-#' @usage arv$projects.get(uuid)
-#' @param uuid The UUID of the Group in question.
-#' @return Group object.
-#' @name projects.get
-NULL
-
-#' project.create
-#'
-#' projects.create wrapps groups.create method by setting group_class attribute to "project".
-#'
-#' @usage arv$projects.create(group, ensure_unique_name = "false")
-#' @param group Group object.
-#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
-#' @return Group object.
-#' @name projects.create
-NULL
-
-#' project.update
-#'
-#' projects.update wrapps groups.update method by setting group_class attribute to "project".
-#'
-#' @usage arv$projects.update(group, uuid)
-#' @param group Group object.
-#' @param uuid The UUID of the Group in question.
-#' @return Group object.
-#' @name projects.update
-NULL
-
-#' project.delete
-#'
-#' projects.delete is equivalent to groups.delete method.
-#'
-#' @usage arv$project.delete(uuid)
-#' @param uuid The UUID of the Group in question.
-#' @return Group object.
-#' @name projects.delete
-NULL
-
-#' project.list
-#'
-#' projects.list wrapps groups.list method by setting group_class attribute to "project".
-#'
-#' @usage arv$projects.list(filters = NULL,
-#' where = NULL, order = NULL, distinct = NULL,
-#' limit = "100", offset = "0", count = "exact",
-#' include_trash = NULL, uuid = NULL, recursive = NULL)
-#' @param filters
-#' @param where
-#' @param order
-#' @param distinct
-#' @param limit
-#' @param offset
-#' @param count
-#' @param include_trash Include items whose is_trashed attribute is true.
-#' @param uuid
-#' @param recursive Include contents from child groups recursively.
-#' @return Group object.
-#' @name projects.list
-NULL
-
-#' Arvados
-#'
-#' Arvados class gives users ability to access Arvados REST API.
-#'
-#' @section Usage:
-#' \preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
-#'
-#' @section Arguments:
-#' \describe{
-#' \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
-#' \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
-#' \item{numRetries}{Number which specifies how many times to retry failed service requests.}
-#' }
-#'
-#' @section Methods:
-#' \describe{
-#' \item{}{\code{\link{api_client_authorizations.create}}}
-#' \item{}{\code{\link{api_client_authorizations.create_system_auth}}}
-#' \item{}{\code{\link{api_client_authorizations.current}}}
-#' \item{}{\code{\link{api_client_authorizations.delete}}}
-#' \item{}{\code{\link{api_client_authorizations.get}}}
-#' \item{}{\code{\link{api_client_authorizations.list}}}
-#' \item{}{\code{\link{api_client_authorizations.update}}}
-#' \item{}{\code{\link{api_clients.create}}}
-#' \item{}{\code{\link{api_clients.delete}}}
-#' \item{}{\code{\link{api_clients.get}}}
-#' \item{}{\code{\link{api_clients.list}}}
-#' \item{}{\code{\link{api_clients.update}}}
-#' \item{}{\code{\link{authorized_keys.create}}}
-#' \item{}{\code{\link{authorized_keys.delete}}}
-#' \item{}{\code{\link{authorized_keys.get}}}
-#' \item{}{\code{\link{authorized_keys.list}}}
-#' \item{}{\code{\link{authorized_keys.update}}}
-#' \item{}{\code{\link{collections.create}}}
-#' \item{}{\code{\link{collections.delete}}}
-#' \item{}{\code{\link{collections.get}}}
-#' \item{}{\code{\link{collections.list}}}
-#' \item{}{\code{\link{collections.provenance}}}
-#' \item{}{\code{\link{collections.trash}}}
-#' \item{}{\code{\link{collections.untrash}}}
-#' \item{}{\code{\link{collections.update}}}
-#' \item{}{\code{\link{collections.used_by}}}
-#' \item{}{\code{\link{configs.get}}}
-#' \item{}{\code{\link{container_requests.create}}}
-#' \item{}{\code{\link{container_requests.delete}}}
-#' \item{}{\code{\link{container_requests.get}}}
-#' \item{}{\code{\link{container_requests.list}}}
-#' \item{}{\code{\link{container_requests.update}}}
-#' \item{}{\code{\link{containers.auth}}}
-#' \item{}{\code{\link{containers.create}}}
-#' \item{}{\code{\link{containers.current}}}
-#' \item{}{\code{\link{containers.delete}}}
-#' \item{}{\code{\link{containers.get}}}
-#' \item{}{\code{\link{containers.list}}}
-#' \item{}{\code{\link{containers.lock}}}
-#' \item{}{\code{\link{containers.secret_mounts}}}
-#' \item{}{\code{\link{containers.unlock}}}
-#' \item{}{\code{\link{containers.update}}}
-#' \item{}{\code{\link{groups.contents}}}
-#' \item{}{\code{\link{groups.create}}}
-#' \item{}{\code{\link{groups.delete}}}
-#' \item{}{\code{\link{groups.get}}}
-#' \item{}{\code{\link{groups.list}}}
-#' \item{}{\code{\link{groups.shared}}}
-#' \item{}{\code{\link{groups.trash}}}
-#' \item{}{\code{\link{groups.untrash}}}
-#' \item{}{\code{\link{groups.update}}}
-#' \item{}{\code{\link{keep_services.accessible}}}
-#' \item{}{\code{\link{keep_services.create}}}
-#' \item{}{\code{\link{keep_services.delete}}}
-#' \item{}{\code{\link{keep_services.get}}}
-#' \item{}{\code{\link{keep_services.list}}}
-#' \item{}{\code{\link{keep_services.update}}}
-#' \item{}{\code{\link{links.create}}}
-#' \item{}{\code{\link{links.delete}}}
-#' \item{}{\code{\link{links.get}}}
-#' \item{}{\code{\link{links.get_permissions}}}
-#' \item{}{\code{\link{links.list}}}
-#' \item{}{\code{\link{links.update}}}
-#' \item{}{\code{\link{logs.create}}}
-#' \item{}{\code{\link{logs.delete}}}
-#' \item{}{\code{\link{logs.get}}}
-#' \item{}{\code{\link{logs.list}}}
-#' \item{}{\code{\link{logs.update}}}
-#' \item{}{\code{\link{projects.create}}}
-#' \item{}{\code{\link{projects.delete}}}
-#' \item{}{\code{\link{projects.get}}}
-#' \item{}{\code{\link{projects.list}}}
-#' \item{}{\code{\link{projects.update}}}
-#' \item{}{\code{\link{repositories.create}}}
-#' \item{}{\code{\link{repositories.delete}}}
-#' \item{}{\code{\link{repositories.get}}}
-#' \item{}{\code{\link{repositories.get_all_permissions}}}
-#' \item{}{\code{\link{repositories.list}}}
-#' \item{}{\code{\link{repositories.update}}}
-#' \item{}{\code{\link{user_agreements.create}}}
-#' \item{}{\code{\link{user_agreements.delete}}}
-#' \item{}{\code{\link{user_agreements.get}}}
-#' \item{}{\code{\link{user_agreements.list}}}
-#' \item{}{\code{\link{user_agreements.new}}}
-#' \item{}{\code{\link{user_agreements.sign}}}
-#' \item{}{\code{\link{user_agreements.signatures}}}
-#' \item{}{\code{\link{user_agreements.update}}}
-#' \item{}{\code{\link{users.activate}}}
-#' \item{}{\code{\link{users.create}}}
-#' \item{}{\code{\link{users.current}}}
-#' \item{}{\code{\link{users.delete}}}
-#' \item{}{\code{\link{users.get}}}
-#' \item{}{\code{\link{users.list}}}
-#' \item{}{\code{\link{users.merge}}}
-#' \item{}{\code{\link{users.setup}}}
-#' \item{}{\code{\link{users.system}}}
-#' \item{}{\code{\link{users.unsetup}}}
-#' \item{}{\code{\link{users.update}}}
-#' \item{}{\code{\link{virtual_machines.create}}}
-#' \item{}{\code{\link{virtual_machines.delete}}}
-#' \item{}{\code{\link{virtual_machines.get}}}
-#' \item{}{\code{\link{virtual_machines.get_all_logins}}}
-#' \item{}{\code{\link{virtual_machines.list}}}
-#' \item{}{\code{\link{virtual_machines.logins}}}
-#' \item{}{\code{\link{virtual_machines.update}}}
-#' \item{}{\code{\link{workflows.create}}}
-#' \item{}{\code{\link{workflows.delete}}}
-#' \item{}{\code{\link{workflows.get}}}
-#' \item{}{\code{\link{workflows.list}}}
-#' \item{}{\code{\link{workflows.update}}}
-#' }
-#'
-#' @name Arvados
-#' @examples
-#' \dontrun{
-#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-#'
-#' collection <- arv$collections.get("uuid")
-#'
-#' collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
-#' collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test%")))
-#'
-#' deletedCollection <- arv$collections.delete("uuid")
-#'
-#' updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"),
-#' "uuid")
-#'
-#' createdCollection <- arv$collections.create(list(name = "Example",
-#' description = "This is a test collection"))
-#' }
-NULL
-
-#' @export
+#' @export Arvados
Arvados <- R6::R6Class(
- "Arvados",
-
- public = list(
-
- initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
- {
- if(!is.null(hostName))
- Sys.setenv(ARVADOS_API_HOST = hostName)
-
- if(!is.null(authToken))
- Sys.setenv(ARVADOS_API_TOKEN = authToken)
-
- hostName <- Sys.getenv("ARVADOS_API_HOST")
- token <- Sys.getenv("ARVADOS_API_TOKEN")
-
- if(hostName == "" | token == "")
- stop(paste("Please provide host name and authentification token",
- "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
- "environment variables."))
-
- private$token <- token
- private$host <- paste0("https://", hostName, "/arvados/v1/")
- private$numRetries <- numRetries
- private$REST <- RESTService$new(token, hostName,
- HttpRequest$new(), HttpParser$new(),
- numRetries)
-
- },
-
- projects.get = function(uuid)
- {
- self$groups.get(uuid)
- },
-
- projects.create = function(group, ensure_unique_name = "false")
- {
- group <- c("group_class" = "project", group)
- self$groups.create(group, ensure_unique_name)
- },
-
- projects.update = function(group, uuid)
- {
- group <- c("group_class" = "project", group)
- self$groups.update(group, uuid)
- },
-
- projects.list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- include_trash = NULL)
- {
- filters[[length(filters) + 1]] <- list("group_class", "=", "project")
- self$groups.list(filters, where, order, select, distinct,
- limit, offset, count, include_trash)
- },
-
- projects.delete = function(uuid)
- {
- self$groups.delete(uuid)
- },
-
- api_clients.get = function(uuid)
- {
- endPoint <- stringr::str_interp("api_clients/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_clients.create = function(apiclient,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("api_clients")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(apiclient) > 0)
- body <- jsonlite::toJSON(list(apiclient = apiclient),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_clients.update = function(apiclient, uuid)
- {
- endPoint <- stringr::str_interp("api_clients/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(apiclient) > 0)
- body <- jsonlite::toJSON(list(apiclient = apiclient),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_clients.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("api_clients/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_clients.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("api_clients")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.get = function(uuid)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.create = function(apiclientauthorization,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("api_client_authorizations")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(apiclientauthorization) > 0)
- body <- jsonlite::toJSON(list(apiclientauthorization = apiclientauthorization),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.update = function(apiclientauthorization, uuid)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(apiclientauthorization) > 0)
- body <- jsonlite::toJSON(list(apiclientauthorization = apiclientauthorization),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.create_system_auth = function(api_client_id = NULL, scopes = NULL)
- {
- endPoint <- stringr::str_interp("api_client_authorizations/create_system_auth")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(api_client_id = api_client_id,
- scopes = scopes)
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.current = function()
- {
- endPoint <- stringr::str_interp("api_client_authorizations/current")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- api_client_authorizations.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("api_client_authorizations")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- authorized_keys.get = function(uuid)
- {
- endPoint <- stringr::str_interp("authorized_keys/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- authorized_keys.create = function(authorizedkey,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("authorized_keys")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(authorizedkey) > 0)
- body <- jsonlite::toJSON(list(authorizedkey = authorizedkey),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- authorized_keys.update = function(authorizedkey, uuid)
- {
- endPoint <- stringr::str_interp("authorized_keys/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(authorizedkey) > 0)
- body <- jsonlite::toJSON(list(authorizedkey = authorizedkey),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- authorized_keys.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("authorized_keys/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- authorized_keys.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("authorized_keys")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.get = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.create = function(collection,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("collections")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(collection) > 0)
- body <- jsonlite::toJSON(list(collection = collection),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.update = function(collection, uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(collection) > 0)
- body <- jsonlite::toJSON(list(collection = collection),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.provenance = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/provenance")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.used_by = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/used_by")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.trash = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/trash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.untrash = function(uuid)
- {
- endPoint <- stringr::str_interp("collections/${uuid}/untrash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- collections.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL,
- include_trash = NULL, include_old_versions = NULL)
- {
- endPoint <- stringr::str_interp("collections")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation,
- include_trash = include_trash, include_old_versions = include_old_versions)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.get = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.create = function(container, ensure_unique_name = "false",
- cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("containers")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(container) > 0)
- body <- jsonlite::toJSON(list(container = container),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.update = function(container, uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(container) > 0)
- body <- jsonlite::toJSON(list(container = container),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.auth = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/auth")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.lock = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/lock")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.unlock = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/unlock")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.secret_mounts = function(uuid)
- {
- endPoint <- stringr::str_interp("containers/${uuid}/secret_mounts")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.current = function()
- {
- endPoint <- stringr::str_interp("containers/current")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- containers.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("containers")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- container_requests.get = function(uuid)
- {
- endPoint <- stringr::str_interp("container_requests/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- container_requests.create = function(containerrequest,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("container_requests")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(containerrequest) > 0)
- body <- jsonlite::toJSON(list(containerrequest = containerrequest),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- container_requests.update = function(containerrequest, uuid)
- {
- endPoint <- stringr::str_interp("container_requests/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(containerrequest) > 0)
- body <- jsonlite::toJSON(list(containerrequest = containerrequest),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- container_requests.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("container_requests/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- container_requests.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL,
- include_trash = NULL)
- {
- endPoint <- stringr::str_interp("container_requests")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation,
- include_trash = include_trash)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.get = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.create = function(group, ensure_unique_name = "false",
- cluster_id = NULL, async = "false")
- {
- endPoint <- stringr::str_interp("groups")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id, async = async)
-
- if(length(group) > 0)
- body <- jsonlite::toJSON(list(group = group),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.update = function(group, uuid, async = "false")
- {
- endPoint <- stringr::str_interp("groups/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(async = async)
-
- if(length(group) > 0)
- body <- jsonlite::toJSON(list(group = group),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.contents = function(filters = NULL,
- where = NULL, order = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- cluster_id = NULL, bypass_federation = NULL,
- include_trash = NULL, uuid = NULL, recursive = NULL,
- include = NULL)
- {
- endPoint <- stringr::str_interp("groups/contents")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, distinct = distinct, limit = limit,
- offset = offset, count = count, cluster_id = cluster_id,
- bypass_federation = bypass_federation, include_trash = include_trash,
- uuid = uuid, recursive = recursive, include = include)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.shared = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL,
- include_trash = NULL, include = NULL)
- {
- endPoint <- stringr::str_interp("groups/shared")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation,
- include_trash = include_trash, include = include)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.trash = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}/trash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.untrash = function(uuid)
- {
- endPoint <- stringr::str_interp("groups/${uuid}/untrash")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- groups.list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- cluster_id = NULL, bypass_federation = NULL,
- include_trash = NULL)
- {
- endPoint <- stringr::str_interp("groups")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation,
- include_trash = include_trash)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- keep_services.get = function(uuid)
- {
- endPoint <- stringr::str_interp("keep_services/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- keep_services.create = function(keepservice,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("keep_services")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(keepservice) > 0)
- body <- jsonlite::toJSON(list(keepservice = keepservice),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- keep_services.update = function(keepservice, uuid)
- {
- endPoint <- stringr::str_interp("keep_services/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(keepservice) > 0)
- body <- jsonlite::toJSON(list(keepservice = keepservice),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- keep_services.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("keep_services/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- keep_services.accessible = function()
- {
- endPoint <- stringr::str_interp("keep_services/accessible")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- keep_services.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("keep_services")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- links.get = function(uuid)
- {
- endPoint <- stringr::str_interp("links/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- links.create = function(link, ensure_unique_name = "false",
- cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("links")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(link) > 0)
- body <- jsonlite::toJSON(list(link = link),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- links.update = function(link, uuid)
- {
- endPoint <- stringr::str_interp("links/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(link) > 0)
- body <- jsonlite::toJSON(list(link = link),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- links.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("links/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- links.list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("links")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- links.get_permissions = function(uuid)
- {
- endPoint <- stringr::str_interp("permissions/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- logs.get = function(uuid)
- {
- endPoint <- stringr::str_interp("logs/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- logs.create = function(log, ensure_unique_name = "false",
- cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("logs")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(log) > 0)
- body <- jsonlite::toJSON(list(log = log),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- logs.update = function(log, uuid)
- {
- endPoint <- stringr::str_interp("logs/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(log) > 0)
- body <- jsonlite::toJSON(list(log = log),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- logs.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("logs/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- logs.list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("logs")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.get = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.create = function(user, ensure_unique_name = "false",
- cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("users")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(user) > 0)
- body <- jsonlite::toJSON(list(user = user),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.update = function(user, uuid, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("users/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(bypass_federation = bypass_federation)
-
- if(length(user) > 0)
- body <- jsonlite::toJSON(list(user = user),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.current = function()
- {
- endPoint <- stringr::str_interp("users/current")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.system = function()
- {
- endPoint <- stringr::str_interp("users/system")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.activate = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}/activate")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.setup = function(uuid = NULL, user = NULL,
- repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
- {
- endPoint <- stringr::str_interp("users/setup")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(uuid = uuid, user = user,
- repo_name = repo_name, vm_uuid = vm_uuid,
- send_notification_email = send_notification_email)
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.unsetup = function(uuid)
- {
- endPoint <- stringr::str_interp("users/${uuid}/unsetup")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.merge = function(new_owner_uuid, new_user_token = NULL,
- redirect_to_new_user = NULL, old_user_uuid = NULL,
- new_user_uuid = NULL)
- {
- endPoint <- stringr::str_interp("users/merge")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(new_owner_uuid = new_owner_uuid,
- new_user_token = new_user_token, redirect_to_new_user = redirect_to_new_user,
- old_user_uuid = old_user_uuid, new_user_uuid = new_user_uuid)
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- users.list = function(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("users")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- repositories.get = function(uuid)
- {
- endPoint <- stringr::str_interp("repositories/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- repositories.create = function(repository,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("repositories")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(repository) > 0)
- body <- jsonlite::toJSON(list(repository = repository),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- repositories.update = function(repository, uuid)
- {
- endPoint <- stringr::str_interp("repositories/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(repository) > 0)
- body <- jsonlite::toJSON(list(repository = repository),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- repositories.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("repositories/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- repositories.get_all_permissions = function()
- {
- endPoint <- stringr::str_interp("repositories/get_all_permissions")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- repositories.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("repositories")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.get = function(uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.create = function(virtualmachine,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("virtual_machines")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(virtualmachine) > 0)
- body <- jsonlite::toJSON(list(virtualmachine = virtualmachine),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.update = function(virtualmachine, uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(virtualmachine) > 0)
- body <- jsonlite::toJSON(list(virtualmachine = virtualmachine),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.logins = function(uuid)
- {
- endPoint <- stringr::str_interp("virtual_machines/${uuid}/logins")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.get_all_logins = function()
- {
- endPoint <- stringr::str_interp("virtual_machines/get_all_logins")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- virtual_machines.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("virtual_machines")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- workflows.get = function(uuid)
- {
- endPoint <- stringr::str_interp("workflows/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- workflows.create = function(workflow, ensure_unique_name = "false",
- cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("workflows")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(workflow) > 0)
- body <- jsonlite::toJSON(list(workflow = workflow),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- workflows.update = function(workflow, uuid)
- {
- endPoint <- stringr::str_interp("workflows/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(workflow) > 0)
- body <- jsonlite::toJSON(list(workflow = workflow),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- workflows.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("workflows/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- workflows.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("workflows")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.get = function(uuid)
- {
- endPoint <- stringr::str_interp("user_agreements/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.create = function(useragreement,
- ensure_unique_name = "false", cluster_id = NULL)
- {
- endPoint <- stringr::str_interp("user_agreements")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(ensure_unique_name = ensure_unique_name,
- cluster_id = cluster_id)
-
- if(length(useragreement) > 0)
- body <- jsonlite::toJSON(list(useragreement = useragreement),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.update = function(useragreement, uuid)
- {
- endPoint <- stringr::str_interp("user_agreements/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- if(length(useragreement) > 0)
- body <- jsonlite::toJSON(list(useragreement = useragreement),
- auto_unbox = TRUE)
- else
- body <- NULL
-
- response <- private$REST$http$exec("PUT", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.delete = function(uuid)
- {
- endPoint <- stringr::str_interp("user_agreements/${uuid}")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("DELETE", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.signatures = function()
- {
- endPoint <- stringr::str_interp("user_agreements/signatures")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.sign = function()
- {
- endPoint <- stringr::str_interp("user_agreements/sign")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("POST", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.list = function(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", cluster_id = NULL, bypass_federation = NULL)
- {
- endPoint <- stringr::str_interp("user_agreements")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- list(filters = filters, where = where,
- order = order, select = select, distinct = distinct,
- limit = limit, offset = offset, count = count,
- cluster_id = cluster_id, bypass_federation = bypass_federation)
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- user_agreements.new = function()
- {
- endPoint <- stringr::str_interp("user_agreements/new")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- configs.get = function()
- {
- endPoint <- stringr::str_interp("config")
- url <- paste0(private$host, endPoint)
- headers <- list(Authorization = paste("Bearer", private$token),
- "Content-Type" = "application/json")
- queryArgs <- NULL
-
- body <- NULL
-
- response <- private$REST$http$exec("GET", url, headers, body,
- queryArgs, private$numRetries)
- resource <- private$REST$httpParser$parseJSONResponse(response)
-
- if(!is.null(resource$errors))
- stop(resource$errors)
-
- resource
- },
-
- getHostName = function() private$host,
- getToken = function() private$token,
- setRESTService = function(newREST) private$REST <- newREST,
- getRESTService = function() private$REST
- ),
-
- private = list(
-
- token = NULL,
- host = NULL,
- REST = NULL,
- numRetries = NULL
- ),
-
- cloneable = FALSE
+ "Arvados",
+
+ public = list(
+
+ #' @description
+ #' Initialize new enviroment.
+ #' @param authToken ARVADOS_API_TOKEN from 'Get API Token' on Arvados.
+ #' @param hostName ARVADOS_API_HOST from 'Get API Token' on Arvados.
+ #' @param numRetries Specify number of times to retry failed service requests.
+ #' @return A new `Arvados` object.
+ #' @examples
+ #' arv <- Arvados$new(authToken = "ARVADOS_API_TOKEN", hostName = "ARVADOS_API_HOST", numRetries = 3)
+ initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
+ {
+ if(!is.null(hostName))
+ Sys.setenv(ARVADOS_API_HOST = hostName)
+
+ if(!is.null(authToken))
+ Sys.setenv(ARVADOS_API_TOKEN = authToken)
+
+ hostName <- Sys.getenv("ARVADOS_API_HOST")
+ token <- Sys.getenv("ARVADOS_API_TOKEN")
+
+ if(hostName == "" | token == "")
+ stop(paste("Please provide host name and authentification token",
+ "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
+ "environment variables."))
+
+ private$token <- token
+ private$host <- paste0("https://", hostName, "/arvados/v1/")
+ private$numRetries <- numRetries
+ private$REST <- RESTService$new(token, hostName,
+ HttpRequest$new(), HttpParser$new(),
+ numRetries)
+
+ },
+
+ #' @description
+ #' project_exist enables checking if the project with such a UUID exist.
+ #' @param uuid The UUID of a project or a file.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_exist(uuid = "projectUUID")
+ #' }
+ project_exist = function(uuid)
+ {
+ proj <- self$project_list(list(list("uuid", '=', uuid)))
+ value <- length(proj$items)
+
+ if (value == 1){
+ cat(format('TRUE'))
+ } else {
+ cat(format('FALSE'))
+ }
+ },
+
+ #' @description
+ #' project_get returns the demanded project.
+ #' @param uuid The UUID of the Group in question.
+ #' @examples
+ #' \dontrun{
+ #' project <- arv$project_get(uuid = 'projectUUID')
+ #' }
+ project_get = function(uuid)
+ {
+ self$groups_get(uuid)
+ },
+
+ #' @description
+ #' project_create creates a new project of a given name and description.
+ #' @param name Name of the project.
+ #' @param description Description of the project.
+ #' @param ownerUUID The UUID of the maternal project to created one.
+ #' @param properties List of the properties of the project.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @examples
+ #' \dontrun{
+ #' Properties <- list() # should contain a list of new properties to be added
+ #' new_project <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
+ #' }
+ project_create = function(name, description, ownerUUID, properties = NULL, ensureUniqueName = "false")
+ {
+ group <- list(name = name, description = description, owner_uuid = ownerUUID, properties = properties)
+ group <- c("group_class" = "project", group)
+ self$groups_create(group, ensureUniqueName = ensureUniqueName)
+ },
+
+ #' @description
+ #' project_properties_set is a method defined in Arvados class that enables setting properties. Allows to set or overwrite the properties. In case there are set already it overwrites them.
+ #' @param listProperties List of new properties.
+ #' @param uuid The UUID of a project or a file.
+ #' @examples
+ #' \dontrun{
+ #' Properties <- list() # should contain a list of new properties to be added
+ #' arv$project_properties_set(Properties, uuid)
+ #' }
+ project_properties_set = function(listProperties, uuid)
+ {
+ group <- c("group_class" = "project", list("properties" = listProperties))
+ self$groups_update(group, uuid)
+
+ },
+
+ #' @description
+ #' project_properties_append is a method defined in Arvados class that enables appending properties. Allows to add new properties.
+ #' @param properties List of new properties.
+ #' @param uuid The UUID of a project or a file.
+ #' @examples
+ #' \dontrun{
+ #' newProperties <- list() # should contain a list of new properties to be added
+ #' arv$project_properties_append(properties = newProperties, uuid)
+ #' }
+ project_properties_append = function(properties, uuid)
+ {
+ proj <- self$project_list(list(list('uuid', '=', uuid)))
+ projProp <- proj$items[[1]]$properties
+
+ newListOfProperties <- c(projProp, properties)
+ uniqueProperties <- unique(unlist(newListOfProperties))
+ newListOfProperties <- suppressWarnings(newListOfProperties[which(newListOfProperties == uniqueProperties)])
+
+ group <- c("group_class" = "project", list("properties" = newListOfProperties))
+ self$groups_update(group, uuid);
+
+ },
+
+ #' @description
+ #' project_properties_get is a method defined in Arvados class that returns properties.
+ #' @param uuid The UUID of a project or a file.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_properties_get(projectUUID)
+ #' }
+ project_properties_get = function(uuid)
+ {
+ proj <- self$project_list(list(list('uuid', '=', uuid)))
+ proj$items[[1]]$properties
+ },
+
+ #' @description
+ #' project_properties_delete is a method defined in Arvados class that deletes list of properties.
+ #' @param oneProp Property to be deleted.
+ #' @param uuid The UUID of a project or a file.
+ #' @examples
+ #' \dontrun{
+ #' Properties <- list() # should contain a list of new properties to be added
+ #' arv$project_properties_delete(Properties, projectUUID)
+ #' }
+ project_properties_delete = function(oneProp, uuid)
+ {
+ proj <- self$project_list(list(list('uuid', '=', uuid))) # find project
+ projProp <- proj$items[[1]]$properties
+ for (i in 1:length(projProp)){
+ solution <- identical(projProp[i],oneProp)
+ if (solution == TRUE) {
+ projProp <- projProp[names(projProp) != names(oneProp)]
+ self$project_properties_set(projProp, uuid)
+ }
+ }
+ },
+
+ #' @description
+ #' project_update enables updating project. New name, description and properties may be given.
+ #' @param ... Feature to be updated (name, description, properties).
+ #' @param uuid The UUID of a project in question.
+ #' @examples
+ #' \dontrun{
+ #' newProperties <- list() # should contain a list of new properties to be added
+ #' arv$project_update(name = "new project name", properties = newProperties, uuid = projectUUID)
+ #' }
+ project_update = function(..., uuid) {
+ vec <- list(...)
+ for (i in 1:length(vec))
+ {
+ if (names(vec[i]) == 'properties') {
+ solution <- self$project_properties_append(vec$properties, uuid = uuid)
+ }
+ }
+ vecNew <- vec[names(vec) != "properties"]
+ vecNew <- c("group_class" = "project", vecNew)
+ z <- self$groups_update(vecNew, uuid)
+ },
+
+ #' @description
+ #' project_list enables listing project by its name, uuid, properties, permissions.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param includeTrash Include items whose is_trashed attribute is true.
+ #' @param uuid The UUID of a project in question.
+ #' @param recursive Include contents from child groups recursively.
+ #' @examples
+ #' \dontrun{
+ #' listOfprojects <- arv$project_list(list(list("owner_uuid", "=", projectUUID))) # Sample query which show projects within the project of a given UUID
+ #' }
+ project_list = function(filters = NULL, where = NULL,
+ order = NULL, select = NULL, distinct = NULL,
+ limit = "100", offset = "0", count = "exact",
+ includeTrash = NULL)
+ {
+ filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+ self$groups_list(filters, where, order, select, distinct,
+ limit, offset, count, includeTrash)
+ },
+
+ #' @description
+ #' project_delete trashes project of a given uuid. It can be restored from trash or deleted permanently.
+ #' @param uuid The UUID of the Group in question.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_delete(uuid = 'projectUUID')
+ #' }
+ project_delete = function(uuid)
+ {
+ self$groups_delete(uuid)
+ },
+
+ #' @description
+ #' api_clients_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the apiClient in question.
+ api_clients_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("api_clients/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_clients_create is a method defined in Arvados class.
+ #' @param apiClient apiClient object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ api_clients_create = function(apiClient,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("api_clients")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(apiClient) > 0)
+ body <- jsonlite::toJSON(list(apiClient = apiClient),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_clients_update is a method defined in Arvados class.
+ #' @param apiClient apiClient object.
+ #' @param uuid The UUID of the apiClient in question.
+ api_clients_update = function(apiClient, uuid)
+ {
+ endPoint <- stringr::str_interp("api_clients/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(apiClient) > 0)
+ body <- jsonlite::toJSON(list(apiClient = apiClient),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_clients_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the apiClient in question.
+ api_clients_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("api_clients/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_clients_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ api_clients_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("api_clients")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the apiClientAuthorization in question.
+ api_client_authorizations_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_create is a method defined in Arvados class.
+ #' @param apiClientAuthorization apiClientAuthorization object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error on (ownerUUID, name) collision_
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ api_client_authorizations_create = function(apiClientAuthorization,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(apiClientAuthorization) > 0)
+ body <- jsonlite::toJSON(list(apiClientAuthorization = apiClientAuthorization),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_update is a method defined in Arvados class.
+ #' @param apiClientAuthorization apiClientAuthorization object.
+ #' @param uuid The UUID of the apiClientAuthorization in question.
+ api_client_authorizations_update = function(apiClientAuthorization, uuid)
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(apiClientAuthorization) > 0)
+ body <- jsonlite::toJSON(list(apiClientAuthorization = apiClientAuthorization),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the apiClientAuthorization in question.
+ api_client_authorizations_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_create_system_auth is a method defined in Arvados class.
+ #' @param apiClientID
+ #' @param scopes
+ api_client_authorizations_create_system_auth = function(apiClientID = NULL, scopes = NULL)
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations/create_system_auth")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(apiClientID = apiClientID,
+ scopes = scopes)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_current is a method defined in Arvados class.
+ api_client_authorizations_current = function()
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations/current")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' api_client_authorizations_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ api_client_authorizations_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("api_client_authorizations")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' authorized_keys_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the authorizedKey in question.
+ authorized_keys_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' authorized_keys_create is a method defined in Arvados class.
+ #' @param authorizedKey authorizedKey object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ authorized_keys_create = function(authorizedKey,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("authorized_keys")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(authorizedKey) > 0)
+ body <- jsonlite::toJSON(list(authorizedKey = authorizedKey),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' authorized_keys_update is a method defined in Arvados class.
+ #' @param authorizedKey authorizedKey object.
+ #' @param uuid The UUID of the authorizedKey in question.
+ authorized_keys_update = function(authorizedKey, uuid)
+ {
+ endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(authorizedKey) > 0)
+ body <- jsonlite::toJSON(list(authorizedKey = authorizedKey),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' authorized_keys_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the authorizedKey in question.
+ authorized_keys_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' authorized_keys_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ authorized_keys_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("authorized_keys")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Collection in question.
+ #' @examples
+ #' \dontrun{
+ #' collection <- arv$collections_get(uuid = collectionUUID)
+ #' }
+ collections_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_create is a method defined in Arvados class that enables collections creation.
+ #' @param name Name of the collection.
+ #' @param description Description of the collection.
+ #' @param ownerUUID UUID of the maternal project to created one.
+ #' @param properties Properties of the collection.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ #' @examples
+ #' \dontrun{
+ #' Properties <- list() # should contain a list of new properties to be added
+ #' arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
+ #' }
+ collections_create = function(name, description, ownerUUID = NULL, properties = NULL, # name and description are obligatory
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("collections")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ collection <- list(name = name, description = description, owner_uuid = ownerUUID, properties = properties)
+ if(length(collection) > 0)
+ body <- jsonlite::toJSON(list(collection = collection),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors)){
+ if(identical(sub('Entity:.*',"", resource$errors), "//railsapi.internal/arvados/v1/collections: 422 Unprocessable ")){
+ resource <- cat(format("A collection with the given name already exists in this projects. If you want to update it use collections_update() instead"))
+ }else{
+ stop(resource$errors)
+ }
+ }
+
+ resource
+ },
+
+ #' @description
+ #' collections_update is a method defined in Arvados class.
+ #' @param name New name of the collection.
+ #' @param description New description of the collection.
+ #' @param ownerUUID UUID of the maternal project to created one.
+ #' @param properties New list of properties of the collection.
+ #' @param uuid The UUID of the Collection in question.
+ #' @examples
+ #' \dontrun{
+ #' collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
+ #' }
+ collections_update = function(name, description, ownerUUID = NULL, properties = NULL, uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ collection <- list(name = name, description = description, ownerUUID = ownerUUID, properties = properties)
+ if(length(collection) > 0)
+ body <- jsonlite::toJSON(list(collection = collection),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Collection in question.
+ #' @examples
+ #' \dontrun{
+ #' arv$collection_delete(collectionUUID)
+ #' }
+ collections_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_provenance is a method defined in Arvados class, it returns the collection by uuid.
+ #' @param uuid The UUID of the Collection in question.
+ #' @examples
+ #' \dontrun{
+ #' collection <- arv$collections_provenance(collectionUUID)
+ #' }
+ collections_provenance = function(uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}/provenance")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_used_by is a method defined in Arvados class, it returns collection by portable_data_hash.
+ #' @param uuid The UUID of the Collection in question.
+ collections_used_by = function(uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}/used_by")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_trash is a method defined in Arvados class, it moves collection to trash.
+ #' @param uuid The UUID of the Collection in question.
+ #' @examples
+ #' \dontrun{
+ #' arv$collections_trash(collectionUUID)
+ #' }
+ collections_trash = function(uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}/trash")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_untrash is a method defined in Arvados class, it moves collection from trash to project.
+ #' @param uuid The UUID of the Collection in question.
+ #' @examples
+ #' \dontrun{
+ #' arv$collections_untrash(collectionUUID)
+ #' }
+ collections_untrash = function(uuid)
+ {
+ endPoint <- stringr::str_interp("collections/${uuid}/untrash")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' collections_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ #' @param includeTrash Include collections whose is_trashed attribute is true.
+ #' @param includeOldVersions Include past collection versions.
+ #' @examples
+ #' \dontrun{
+ #' collectionList <- arv$collections_list(list(list("name", "=", "Example")))
+ #' }
+ collections_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL,
+ includeTrash = NULL, includeOldVersions = NULL)
+ {
+ endPoint <- stringr::str_interp("collections")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation,
+ includeTrash = includeTrash, includeOldVersions = includeOldVersions)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Container in question.
+ containers_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_create is a method defined in Arvados class.
+ #' @param container Container object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ containers_create = function(container, ensureUniqueName = "false",
+ clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("containers")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(container) > 0)
+ body <- jsonlite::toJSON(list(container = container),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_update is a method defined in Arvados class.
+ #' @param container Container object.
+ #' @param uuid The UUID of the Container in question.
+ containers_update = function(container, uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(container) > 0)
+ body <- jsonlite::toJSON(list(container = container),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Container in question.
+ containers_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_auth is a method defined in Arvados class.
+ #' @param uuid The UUID of the Container in question.
+ containers_auth = function(uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}/auth")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_lock is a method defined in Arvados class.
+ #' @param uuid The UUID of the Container in question.
+ containers_lock = function(uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}/lock")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_unlock is a method defined in Arvados class.
+ #' @param uuid The UUID of the Container in question.
+ containers_unlock = function(uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}/unlock")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_secret_mounts is a method defined in Arvados class.
+ #' @param uuid The UUID of the Container in question.
+ containers_secret_mounts = function(uuid)
+ {
+ endPoint <- stringr::str_interp("containers/${uuid}/secret_mounts")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_current is a method defined in Arvados class.
+ containers_current = function()
+ {
+ endPoint <- stringr::str_interp("containers/current")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' containers_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ containers_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("containers")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' container_requests_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the containerRequest in question.
+ container_requests_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("container_requests/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' container_requests_create is a method defined in Arvados class.
+ #' @param containerRequest containerRequest object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ container_requests_create = function(containerRequest,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("container_requests")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(containerRequest) > 0)
+ body <- jsonlite::toJSON(list(containerRequest = containerRequest),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' container_requests_update is a method defined in Arvados class.
+ #' @param containerRequest containerRequest object.
+ #' @param uuid The UUID of the containerRequest in question.
+ container_requests_update = function(containerRequest, uuid)
+ {
+ endPoint <- stringr::str_interp("container_requests/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(containerRequest) > 0)
+ body <- jsonlite::toJSON(list(containerRequest = containerRequest),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' container_requests_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the containerRequest in question.
+ container_requests_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("container_requests/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' container_requests_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation bypass federation behavior, list items from local instance database only
+ #' @param includeTrash Include container requests whose owner project is trashed.
+ container_requests_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL,
+ includeTrash = NULL)
+ {
+ endPoint <- stringr::str_interp("container_requests")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation,
+ includeTrash = includeTrash)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Group in question.
+ groups_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("groups/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_create is a method defined in Arvados class that supports project creation.
+ #' @param group Group object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ #' @param async Defer permissions update.
+ groups_create = function(group, ensureUniqueName = "false",
+ clusterID = NULL, async = "false")
+ {
+ endPoint <- stringr::str_interp("groups")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID, async = async)
+
+ if(length(group) > 0)
+ body <- jsonlite::toJSON(list(group = group),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors)){
+ if (identical(sub('#.*', "", resource$errors), "//railsapi.internal/arvados/v1/groups: 422 Unprocessable Entity: ")) {
+ #if (identical(sub('P.*', "", resource$errors), "//railsapi.internal/arvados/v1/groups: 422 Unprocessable Entity: #\u003cActiveRecord::RecordNotUnique: ")) {
+ resource <- cat(format("Project of that name already exist. If you want to update it use project_update() instead"))
+ }else{
+ stop(resource$errors)
+ }
+ }
+
+ return(resource)
+ },
+
+ #' @description
+ #' groups_update is a method defined in Arvados class.
+ #' @param group Group object.
+ #' @param uuid The UUID of the Group in question.
+ #' @param async Defer permissions update.
+ groups_update = function(group, uuid, async = "false")
+ {
+ endPoint <- stringr::str_interp("groups/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- list(async = async)
+
+ if(length(group) > 0)
+ body <- jsonlite::toJSON(list(group = group),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Group in question.
+ groups_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("groups/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ dataTime <- gsub("T.*", "", resource$delete_at)
+ cat("The content will be deleted permanently at", dataTime)
+
+ resource
+ },
+
+ #' @description
+ #' groups_contents is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ #' @param includeTrash Include items whose is_trashed attribute is true.
+ #' @param uuid
+ #' @param recursive Include contents from child groups recursively.
+ #' @param include Include objects referred to by listed field in "included" (only ownerUUID).
+ groups_contents = function(filters = NULL,
+ where = NULL, order = NULL, distinct = NULL,
+ limit = "100", offset = "0", count = "exact",
+ clusterID = NULL, bypassFederation = NULL,
+ includeTrash = NULL, uuid = NULL, recursive = NULL,
+ include = NULL)
+ {
+ endPoint <- stringr::str_interp("groups/contents")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- list(filters = filters, where = where,
+ order = order, distinct = distinct, limit = limit,
+ offset = offset, count = count, clusterID = clusterID,
+ bypassFederation = bypassFederation, includeTrash = includeTrash,
+ uuid = uuid, recursive = recursive, include = include)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_shared is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ #' @param includeTrash Include items whose is_trashed attribute is true.
+ #' @param include
+ groups_shared = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL,
+ includeTrash = NULL, include = NULL)
+ {
+ endPoint <- stringr::str_interp("groups/shared")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation,
+ includeTrash = includeTrash, include = include)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_trash is a method defined in Arvados class.
+ #' @param uuid The UUID of the Group in question.
+ groups_trash = function(uuid)
+ {
+ endPoint <- stringr::str_interp("groups/${uuid}/trash")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_untrash is a method defined in Arvados class.
+ #' @param uuid The UUID of the Group in question.
+ groups_untrash = function(uuid)
+ {
+ endPoint <- stringr::str_interp("groups/${uuid}/untrash")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' groups_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ #' @param includeTrash Include items whose is_trashed attribute is true.
+ groups_list = function(filters = NULL, where = NULL,
+ order = NULL, select = NULL, distinct = NULL,
+ limit = "100", offset = "0", count = "exact",
+ clusterID = NULL, bypassFederation = NULL,
+ includeTrash = NULL)
+ {
+ endPoint <- stringr::str_interp("groups")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation,
+ includeTrash = includeTrash)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' keep_services_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the keepService in question.
+ keep_services_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("keep_services/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' keep_services_create is a method defined in Arvados class.
+ #' @param keepService keepService object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ keep_services_create = function(keepService,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("keep_services")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(keepService) > 0)
+ body <- jsonlite::toJSON(list(keepService = keepService),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' keep_services_update is a method defined in Arvados class.
+ #' @param keepService keepService object.
+ #' @param uuid The UUID of the keepService in question.
+ keep_services_update = function(keepService, uuid)
+ {
+ endPoint <- stringr::str_interp("keep_services/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(keepService) > 0)
+ body <- jsonlite::toJSON(list(keepService = keepService),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' keep_services_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the keepService in question.
+ keep_services_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("keep_services/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' keep_services_accessible is a method defined in Arvados class.
+ keep_services_accessible = function()
+ {
+ endPoint <- stringr::str_interp("keep_services/accessible")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' keep_services_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ keep_services_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("keep_services")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' project_permission_give is a method defined in Arvados class that enables sharing files with another users.
+ #' @param type Possible options are can_read or can_write or can_manage.
+ #' @param uuid The UUID of a project or a file.
+ #' @param user The UUID of the person that gets the permission.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_permission_give(type = "can_read", uuid = objectUUID, user = userUUID)
+ #' }
+ project_permission_give = function(type, uuid, user)
+ {
+ endPoint <- stringr::str_interp("links")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ # it is possible to make it as pasting a list to function, not a 3 arg. What's better?
+ link <- list("link_class" = "permission", "name" = type, "head_uuid" = uuid, "tail_uuid" = user)
+
+ if(length(link) > 0)
+ body <- jsonlite::toJSON(list(link = link),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' project_permission_refuse is a method defined in Arvados class that unables sharing files with another users.
+ #' @param type Possible options are can_read or can_write or can_manage.
+ #' @param uuid The UUID of a project or a file.
+ #' @param user The UUID of a person that permissions are taken from.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_permission_refuse(type = "can_read", uuid = objectUUID, user = userUUID)
+ #' }
+ project_permission_refuse = function(type, uuid, user)
+ {
+ examples <- self$links_list(list(list("head_uuid","=", uuid)))
+
+ theUser <- examples[which(sapply(examples$items, "[[", "tail_uuid") == user)]
+ theType <- theUser$items[which(sapply(theUser$items, "[[", "name") == type)]
+ solution <- theType[which(sapply(theType, "[[", "link_class") == 'permission')]
+
+ if (length(solution) == 0) {
+ cat(format('No permission granted'))
+ } else {
+ self$links_delete(solution[[1]]$uuid)
+ }
+
+ },
+
+ #' @description
+ #' project_permission_update is a method defined in Arvados class that enables updating permissions.
+ #' @param typeNew New option like can_read or can_write or can_manage.
+ #' @param typeOld Old option.
+ #' @param uuid The UUID of a project or a file.
+ #' @param user The UUID of the person that the permission is being updated.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_permission_update(typeOld = "can_read", typeNew = "can_write", uuid = objectUUID, user = userUUID)
+ #' }
+ project_permission_update = function(typeOld, typeNew, uuid, user)
+ {
+ link <- list("name" = typeNew)
+
+ examples <- self$links_list(list(list("head_uuid","=", uuid)))
+
+ theUser <- examples[which(sapply(examples$items, "[[", "tail_uuid") == user)]
+ theType <- theUser$items[which(sapply(theUser$items, "[[", "name") == typeOld)]
+ solution <- theType[which(sapply(theType, "[[", "link_class") == 'permission')]
+
+ if (length(solution) == 0) {
+ cat(format('No permission granted'))
+ } else {
+ self$links_update(link, solution[[1]]$uuid)
+ }
+ },
+
+ #' @description
+ #' project_permission_check is a method defined in Arvados class that enables checking file permissions.
+ #' @param uuid The UUID of a project or a file.
+ #' @param user The UUID of the person that the permission is being updated.
+ #' @param type Possible options are can_read or can_write or can_manage.
+ #' @examples
+ #' \dontrun{
+ #' arv$project_permission_check(type = "can_read", uuid = objectUUID, user = userUUID)
+ #' }
+ project_permission_check = function(uuid, user, type = NULL)
+ {
+ examples <- self$links_list(list(list("head_uuid","=", uuid)))
+
+ theUser <- examples[which(sapply(examples$items, "[[", "tail_uuid") == user)]
+
+ if (length(type) == 0 ){
+ theUser
+ } else {
+ theType <- theUser$items[which(sapply(theUser$items, "[[", "name") == type)]
+ permisions <- theType[which(sapply(theType, "[[", "link_class") == 'permission')]
+ print(permisions[[1]]$name)
+ }
+ },
+
+ #' @description
+ #' links_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Link in question.
+ links_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("links/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' links_create is a method defined in Arvados class.
+ #' @param link Link object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ links_create = function(link, ensureUniqueName = "false",
+ clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("links")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(link) > 0)
+ body <- jsonlite::toJSON(list(link = link),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' links_update is a method defined in Arvados class.
+ #' @param link Link object.
+ #' @param uuid The UUID of the Link in question.
+ links_update = function(link, uuid, async = "false")
+ {
+ endPoint <- stringr::str_interp("links/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(async = async)
+
+ if(length(link) > 0)
+ body <- jsonlite::toJSON(list(link = link),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' links_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Link in question.
+ links_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("links/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' links_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ links_list = function(filters = NULL, where = NULL,
+ order = NULL, select = NULL, distinct = NULL,
+ limit = "100", offset = "0", count = "exact",
+ clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("links")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' links_get_permissions is a method defined in Arvados class.
+ #' @param uuid The UUID of the Log in question.
+ links_get_permissions = function(uuid)
+ {
+ endPoint <- stringr::str_interp("permissions/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' logs_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Log in question.
+ logs_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("logs/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' logs_create is a method defined in Arvados class.
+ #' @param log Log object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ logs_create = function(log, ensureUniqueName = "false",
+ clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("logs")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(log) > 0)
+ body <- jsonlite::toJSON(list(log = log),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' logs_update is a method defined in Arvados class.
+ #' @param log Log object.
+ #' @param uuid The UUID of the Log in question.
+ logs_update = function(log, uuid)
+ {
+ endPoint <- stringr::str_interp("logs/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(log) > 0)
+ body <- jsonlite::toJSON(list(log = log),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' logs_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Log in question.
+ logs_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("logs/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' logs_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ logs_list = function(filters = NULL, where = NULL,
+ order = NULL, select = NULL, distinct = NULL,
+ limit = "100", offset = "0", count = "exact",
+ clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("logs")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the User in question.
+ users_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("users/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_create is a method defined in Arvados class.
+ #' @param user User object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ users_create = function(user, ensureUniqueName = "false",
+ clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("users")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(user) > 0)
+ body <- jsonlite::toJSON(list(user = user),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_update is a method defined in Arvados class.
+ #' @param user User object.
+ #' @param uuid The UUID of the User in question.
+ #' @param bypassFederation
+ users_update = function(user, uuid, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("users/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(bypassFederation = bypassFederation)
+
+ if(length(user) > 0)
+ body <- jsonlite::toJSON(list(user = user),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the User in question.
+ users_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("users/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_current is a method defined in Arvados class.
+ users_current = function()
+ {
+ endPoint <- stringr::str_interp("users/current")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_system is a method defined in Arvados class.
+ users_system = function()
+ {
+ endPoint <- stringr::str_interp("users/system")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_activate is a method defined in Arvados class.
+ #' @param uuid The UUID of the User in question.
+ users_activate = function(uuid)
+ {
+ endPoint <- stringr::str_interp("users/${uuid}/activate")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_setup is a method defined in Arvados class.
+ #' @param uuid
+ #' @param user
+ #' @param repo_name
+ #' @param vm_uuid
+ #' @param send_notification_email
+ users_setup = function(uuid = NULL, user = NULL,
+ repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+ {
+ endPoint <- stringr::str_interp("users/setup")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(uuid = uuid, user = user,
+ repo_name = repo_name, vm_uuid = vm_uuid,
+ send_notification_email = send_notification_email)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_unsetup is a method defined in Arvados class.
+ #' @param uuid The UUID of the User in question.
+ users_unsetup = function(uuid)
+ {
+ endPoint <- stringr::str_interp("users/${uuid}/unsetup")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_merge is a method defined in Arvados class.
+ #' @param newOwnerUUID
+ #' @param newUserToken
+ #' @param redirectToNewUser
+ #' @param oldUserUUID
+ #' @param newUserUUID
+ users_merge = function(newOwnerUUID, newUserToken = NULL,
+ redirectToNewUser = NULL, oldUserUUID = NULL,
+ newUserUUID = NULL)
+ {
+ endPoint <- stringr::str_interp("users/merge")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(newOwnerUUID = newOwnerUUID,
+ newUserToken = newUserToken, redirectToNewUser = redirectToNewUser,
+ oldUserUUID = oldUserUUID, newUserUUID = newUserUUID)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' users_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ users_list = function(filters = NULL, where = NULL,
+ order = NULL, select = NULL, distinct = NULL,
+ limit = "100", offset = "0", count = "exact",
+ clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("users")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' repositories_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Repository in question.
+ repositories_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("repositories/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' repositories_create is a method defined in Arvados class.
+ #' @param repository Repository object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ repositories_create = function(repository,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("repositories")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(repository) > 0)
+ body <- jsonlite::toJSON(list(repository = repository),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' repositories_update is a method defined in Arvados class.
+ #' @param repository Repository object.
+ #' @param uuid The UUID of the Repository in question.
+ repositories_update = function(repository, uuid)
+ {
+ endPoint <- stringr::str_interp("repositories/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(repository) > 0)
+ body <- jsonlite::toJSON(list(repository = repository),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' repositories_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Repository in question.
+ repositories_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("repositories/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' repositories_get_all_permissions is a method defined in Arvados class.
+ repositories_get_all_permissions = function()
+ {
+ endPoint <- stringr::str_interp("repositories/get_all_permissions")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' repositories_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ repositories_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("repositories")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the virtualMachine in question.
+ virtual_machines_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_create is a method defined in Arvados class.
+ #' @param virtualMachine virtualMachine object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ virtual_machines_create = function(virtualMachine,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("virtual_machines")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(virtualMachine) > 0)
+ body <- jsonlite::toJSON(list(virtualMachine = virtualMachine),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_update is a method defined in Arvados class.
+ #' @param virtualMachine virtualMachine object.
+ #' @param uuid The UUID of the virtualMachine in question.
+ virtual_machines_update = function(virtualMachine, uuid)
+ {
+ endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(virtualMachine) > 0)
+ body <- jsonlite::toJSON(list(virtualMachine = virtualMachine),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the virtualMachine in question.
+ virtual_machines_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_logins is a method defined in Arvados class.
+ #' @param uuid The UUID of the virtualMachine in question.
+ virtual_machines_logins = function(uuid)
+ {
+ endPoint <- stringr::str_interp("virtual_machines/${uuid}/logins")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_get_all_logins is a method defined in Arvados class.
+ virtual_machines_get_all_logins = function()
+ {
+ endPoint <- stringr::str_interp("virtual_machines/get_all_logins")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' virtual_machines_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation bypass federation behavior, list items from local instance database only
+ virtual_machines_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("virtual_machines")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' workflows_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the Workflow in question.
+ workflows_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("workflows/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' workflows_create is a method defined in Arvados class.
+ #' @param workflow Workflow object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ workflows_create = function(workflow, ensureUniqueName = "false",
+ clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("workflows")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(workflow) > 0)
+ body <- jsonlite::toJSON(list(workflow = workflow),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' workflows_update is a method defined in Arvados class.
+ #' @param workflow Workflow object.
+ #' @param uuid The UUID of the Workflow in question.
+ workflows_update = function(workflow, uuid)
+ {
+ endPoint <- stringr::str_interp("workflows/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(workflow) > 0)
+ body <- jsonlite::toJSON(list(workflow = workflow),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' workflows_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the Workflow in question.
+ workflows_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("workflows/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' workflows_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ workflows_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("workflows")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_get is a method defined in Arvados class.
+ #' @param uuid The UUID of the userAgreement in question.
+ user_agreements_get = function(uuid)
+ {
+ endPoint <- stringr::str_interp("user_agreements/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_create is a method defined in Arvados class.
+ #' @param userAgreement userAgreement object.
+ #' @param ensureUniqueName Adjust name to ensure uniqueness instead of returning an error.
+ #' @param clusterID Create object on a remote federated cluster instead of the current one.
+ user_agreements_create = function(userAgreement,
+ ensureUniqueName = "false", clusterID = NULL)
+ {
+ endPoint <- stringr::str_interp("user_agreements")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(ensureUniqueName = ensureUniqueName,
+ clusterID = clusterID)
+
+ if(length(userAgreement) > 0)
+ body <- jsonlite::toJSON(list(userAgreement = userAgreement),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_update is a method defined in Arvados class.
+ #' @param userAgreement userAgreement object.
+ #' @param uuid The UUID of the userAgreement in question.
+ user_agreements_update = function(userAgreement, uuid)
+ {
+ endPoint <- stringr::str_interp("user_agreements/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ if(length(userAgreement) > 0)
+ body <- jsonlite::toJSON(list(userAgreement = userAgreement),
+ auto_unbox = TRUE)
+ else
+ body <- NULL
+
+ response <- private$REST$http$exec("PUT", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_delete is a method defined in Arvados class.
+ #' @param uuid The UUID of the userAgreement in question.
+ user_agreements_delete = function(uuid)
+ {
+ endPoint <- stringr::str_interp("user_agreements/${uuid}")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("DELETE", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_signatures is a method defined in Arvados class.
+ user_agreements_signatures = function()
+ {
+ endPoint <- stringr::str_interp("user_agreements/signatures")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_sign is a method defined in Arvados class.
+ user_agreements_sign = function()
+ {
+ endPoint <- stringr::str_interp("user_agreements/sign")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("POST", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_list is a method defined in Arvados class.
+ #' @param filters
+ #' @param where
+ #' @param order
+ #' @param select
+ #' @param distinct
+ #' @param limit
+ #' @param offset
+ #' @param count
+ #' @param clusterID List objects on a remote federated cluster instead of the current one.
+ #' @param bypassFederation Bypass federation behavior, list items from local instance database only.
+ user_agreements_list = function(filters = NULL,
+ where = NULL, order = NULL, select = NULL,
+ distinct = NULL, limit = "100", offset = "0",
+ count = "exact", clusterID = NULL, bypassFederation = NULL)
+ {
+ endPoint <- stringr::str_interp("user_agreements")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- list(filters = filters, where = where,
+ order = order, select = select, distinct = distinct,
+ limit = limit, offset = offset, count = count,
+ clusterID = clusterID, bypassFederation = bypassFederation)
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' user_agreements_new is a method defined in Arvados class.
+ user_agreements_new = function()
+ {
+ endPoint <- stringr::str_interp("user_agreements/new")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ #' @description
+ #' configs_get is a method defined in Arvados class.
+ configs_get = function()
+ {
+ endPoint <- stringr::str_interp("config")
+ url <- paste0(private$host, endPoint)
+ headers <- list(Authorization = paste("Bearer", private$token),
+ "Content-Type" = "application/json")
+ queryArgs <- NULL=
+
+ body <- NULL
+
+ response <- private$REST$http$exec("GET", url, headers, body,
+ queryArgs, private$numRetries)
+ resource <- private$REST$httpParser$parseJSONResponse(response)
+
+ if(!is.null(resource$errors))
+ stop(resource$errors)
+
+ resource
+ },
+
+ getHostName = function() private$host,
+ getToken = function() private$token,
+ setRESTService = function(newREST) private$REST <- newREST,
+ getRESTService = function() private$REST
+ ),
+
+ private = list(
+
+ token = NULL,
+ host = NULL,
+ REST = NULL,
+ numRetries = NULL
+ ),
+
+ cloneable = FALSE
)
+
+
diff --git a/sdk/R/R/ArvadosFile.R b/sdk/R/R/ArvadosFile.R
index fb1d3b335c..f585d1f946 100644
--- a/sdk/R/R/ArvadosFile.R
+++ b/sdk/R/R/ArvadosFile.R
@@ -2,55 +2,10 @@
#
# SPDX-License-Identifier: Apache-2.0
-#' ArvadosFile
+#' R6 Class Representing a ArvadosFile
#'
+#' @description
#' ArvadosFile class represents a file inside Arvados collection.
-#'
-#' @section Usage:
-#' \preformatted{file = ArvadosFile$new(name)}
-#'
-#' @section Arguments:
-#' \describe{
-#' \item{name}{Name of the file.}
-#' }
-#'
-#' @section Methods:
-#' \describe{
-#' \item{getName()}{Returns name of the file.}
-#' \item{getRelativePath()}{Returns file path relative to the root.}
-#' \item{read(contentType = "raw", offset = 0, length = 0)}{Read file content.}
-#' \item{write(content, contentType = "text/html")}{Write to file (override current content of the file).}
-#' \item{connection(rw)}{Get connection opened in "read" or "write" mode.}
-#' \item{flush()}{Write connections content to a file (override current content of the file).}
-#' \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
-#' \item{getSizeInBytes()}{Returns file size in bytes.}
-#' \item{move(destination)}{Moves file to a new location inside collection.}
-#' \item{copy(destination)}{Copies file to a new location inside collection.}
-#' }
-#'
-#' @name ArvadosFile
-#' @examples
-#' \dontrun{
-#' myFile <- ArvadosFile$new("myFile")
-#'
-#' myFile$write("This is new file content")
-#' fileContent <- myFile$read()
-#' fileContent <- myFile$read("text")
-#' fileContent <- myFile$read("raw", offset = 8, length = 4)
-#'
-#' #Write a table:
-#' arvConnection <- myFile$connection("w")
-#' write.table(mytable, arvConnection)
-#' arvadosFile$flush()
-#'
-#' #Read a table:
-#' arvConnection <- myFile$connection("r")
-#' mytable <- read.table(arvConnection)
-#'
-#' myFile$move("newFolder/myFile")
-#' myFile$copy("newFolder/myFile")
-#' }
-NULL
#' @export
ArvadosFile <- R6::R6Class(
@@ -59,6 +14,14 @@ ArvadosFile <- R6::R6Class(
public = list(
+ #' @description
+ #' Initialize new enviroment.
+ #' @param name Name of the new enviroment.
+ #' @return A new `ArvadosFile` object.
+ #' @examples
+ #' \dontrun{
+ #' myFile <- ArvadosFile$new("myFile")
+ #' }
initialize = function(name)
{
if(name == "")
@@ -67,13 +30,32 @@ ArvadosFile <- R6::R6Class(
private$name <- name
},
+ #' @description
+ #' Returns name of the file.
+ #' @examples
+ #' \dontrun{
+ #' arvadosFile$getName()
+ #' }
getName = function() private$name,
+ #' @description
+ #' Returns collections file content as character vector.
+ #' @param fullPath Checking if TRUE.
+ #' @examples
+ #' \dontrun{
+ #' arvadosFile$getFileListing()
+ #' }
getFileListing = function(fullpath = TRUE)
{
self$getName()
},
+ #' @description
+ #' Returns collections content size in bytes.
+ #' @examples
+ #' \dontrun{
+ #' arvadosFile$getSizeInBytes()
+ #' }
getSizeInBytes = function()
{
if(is.null(private$collection))
@@ -96,13 +78,19 @@ ArvadosFile <- R6::R6Class(
return(NULL)
},
+ #' @description
+ #' Returns collection UUID.
getCollection = function() private$collection,
+ #' @description
+ #' Sets new collection.
setCollection = function(collection, setRecursively = TRUE)
{
private$collection <- collection
},
+ #' @description
+ #' Returns file path relative to the root.
getRelativePath = function()
{
relativePath <- c(private$name)
@@ -118,10 +106,25 @@ ArvadosFile <- R6::R6Class(
paste0(relativePath, collapse = "/")
},
+ #' @description
+ #' Returns project UUID.
getParent = function() private$parent,
+ #' @description
+ #' Sets project collection.
setParent = function(newParent) private$parent <- newParent,
+ #' @description
+ #' Read file content.
+ #' @param contentType Type of content. Possible is "text", "raw".
+ #' @param offset Describes the location of a piece of data compared to another location
+ #' @param length Length of content
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, collectionUUID)
+ #' arvadosFile <- collection$get(fileName)
+ #' fileContent <- arvadosFile$read("text")
+ #' }
read = function(contentType = "raw", offset = 0, length = 0)
{
if(is.null(private$collection))
@@ -138,6 +141,15 @@ ArvadosFile <- R6::R6Class(
fileContent
},
+ #' @description
+ #' Get connection opened in "read" or "write" mode.
+ #' @param rw Type of connection.
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, collectionUUID)
+ #' arvadosFile <- collection$get(fileName)
+ #' arvConnection <- arvadosFile$connection("w")
+ #' }
connection = function(rw)
{
if (rw == "r" || rw == "rb")
@@ -155,6 +167,15 @@ ArvadosFile <- R6::R6Class(
}
},
+ #' @description
+ #' Write connections content to a file or override current content of the file.
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, collectionUUID)
+ #' arvadosFile <- collection$get(fileName)
+ #' myFile$write("This is new file content")
+ #' arvadosFile$flush()
+ #' }
flush = function()
{
v <- textConnectionValue(private$buffer)
@@ -162,6 +183,16 @@ ArvadosFile <- R6::R6Class(
self$write(paste(v, collapse='\n'))
},
+ #' @description
+ #' Write to file or override current content of the file.
+ #' @param content File to write.
+ #' @param contentType Type of content. Possible is "text", "raw".
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, collectionUUID)
+ #' arvadosFile <- collection$get(fileName)
+ #' myFile$write("This is new file content")
+ #' }
write = function(content, contentType = "text/html")
{
if(is.null(private$collection))
@@ -175,6 +206,13 @@ ArvadosFile <- R6::R6Class(
writeResult
},
+ #' @description
+ #' Moves file to a new location inside collection.
+ #' @param destination Path to new folder.
+ #' @examples
+ #' \dontrun{
+ #' arvadosFile$move(newPath)
+ #' }
move = function(destination)
{
if(is.null(private$collection))
@@ -207,6 +245,13 @@ ArvadosFile <- R6::R6Class(
self
},
+ #' @description
+ #' Copies file to a new location inside collection.
+ #' @param destination Path to new folder.
+ #' @examples
+ #' \dontrun{
+ #' arvadosFile$copy("NewName.format")
+ #' }
copy = function(destination)
{
if(is.null(private$collection))
@@ -238,6 +283,9 @@ ArvadosFile <- R6::R6Class(
newFile
},
+ #' @description
+ #' Duplicate file and gives it a new name.
+ #' @param newName New name for duplicated file.
duplicate = function(newName = NULL)
{
name <- if(!is.null(newName)) newName else private$name
@@ -261,7 +309,10 @@ ArvadosFile <- R6::R6Class(
# We also need to set content's collection to NULL because
# add method throws exception if we try to add content that already
# belongs to a collection.
+
parentsCollection <- newParent$getCollection()
+ #parent$.__enclos_env__$private$children <- c(parent$.__enclos_env__$private$children, self)
+ #private$parent <- parent
content$setCollection(NULL, setRecursively = FALSE)
newParent$setCollection(NULL, setRecursively = FALSE)
newParent$add(content)
@@ -273,6 +324,9 @@ ArvadosFile <- R6::R6Class(
{
# We temporary set parents collection to NULL. This will ensure that
# remove method doesn't remove this file from REST.
+
+ #private$parent$.__enclos_env__$private$removeChild(private$name)
+ #private$parent <- NULL
parent <- private$parent
parentsCollection <- parent$getCollection()
parent$setCollection(NULL, setRecursively = FALSE)
diff --git a/sdk/R/R/ArvadosR.R b/sdk/R/R/ArvadosR.R
new file mode 100644
index 0000000000..00b068c28a
--- /dev/null
+++ b/sdk/R/R/ArvadosR.R
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+#' @title ArvadosR
+#'
+#' @description
+#'
+#' Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data. With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources.
+#'
+#' @author \itemize{
+#' \item Lucas Di Pentima
+#' \item Ward Vandewege
+#' \item Fuad Muhic
+#' \item Peter Amstutz
+#' \item Aneta Stanczyk
+#' \item Piotr Nowosielski}
+#'
+#' @seealso \itemize{
+#' \item https://arvados.org
+#' \item https://doc.arvados.org/sdk/R/index.html
+#' \item https://git.arvados.org/arvados.git/tree/HEAD:/sdk/R}
+#'
+#' @name ArvadosR
+NULL
diff --git a/sdk/R/R/Collection.R b/sdk/R/R/Collection.R
index 9ed758c0a4..9ca74accc5 100644
--- a/sdk/R/R/Collection.R
+++ b/sdk/R/R/Collection.R
@@ -2,62 +2,45 @@
#
# SPDX-License-Identifier: Apache-2.0
-#' Collection
+#' R6 Class Representing Arvados Collection
#'
-#' Collection class provides interface for working with Arvados collections.
+#' @description
+#' Collection class provides interface for working with Arvados collections,
+#' for exaplme actions like creating, updating, moving or removing are possible.
#'
-#' @section Usage:
-#' \preformatted{collection = Collection$new(arv, uuid)}
+#' @seealso
+#' https://git.arvados.org/arvados.git/tree/HEAD:/sdk/R
#'
-#' @section Arguments:
-#' \describe{
-#' \item{arv}{Arvados object.}
-#' \item{uuid}{UUID of a collection.}
-#' }
-#'
-#' @section Methods:
-#' \describe{
-#' \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
-#' \item{create(files)}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
-#' \item{remove(fileNames)}{Remove one or more files from the collection.}
-#' \item{move(content, destination)}{Moves ArvadosFile or Subcollection to another location in the collection.}
-#' \item{copy(content, destination)}{Copies ArvadosFile or Subcollection to another location in the collection.}
-#' \item{getFileListing()}{Returns collections file content as character vector.}
-#' \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
-#' }
-#'
-#' @name Collection
-#' @examples
-#' \dontrun{
-#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-#' collection <- Collection$new(arv, "uuid")
-#'
-#' createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
-#'
-#' collection$remove("location/to/my/file.cpp")
-#'
-#' collection$move("folder/file.cpp", "file.cpp")
-#'
-#' arvadosFile <- collection$get("location/to/my/file.cpp")
-#' arvadosSubcollection <- collection$get("location/to/my/directory/")
-#' }
-NULL
-
#' @export
+
Collection <- R6::R6Class(
"Collection",
public = list(
- uuid = NULL,
-
- initialize = function(api, uuid)
+ #' @field uuid Autentic for Collection UUID.
+ uuid = NULL,
+
+ #' @description
+ #' Initialize new enviroment.
+ #' @param api Arvados enviroment.
+ #' @param uuid The UUID Autentic for Collection UUID.
+ #' @return A new `Collection` object.
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, CollectionUUID)
+ #' }
+ initialize = function(api, uuid)
{
private$REST <- api$getRESTService()
self$uuid <- uuid
},
+ #' @description
+ #' Adds ArvadosFile or Subcollection specified by content to the collection. Used only with ArvadosFile or Subcollection.
+ #' @param content Content to be added.
+ #' @param relativePath Path to add content.
add = function(content, relativePath = "")
{
if(is.null(private$tree))
@@ -98,6 +81,181 @@ Collection <- R6::R6Class(
}
},
+ #' @description
+ #' Read file content.
+ #' @param file Name of the file.
+ #' @param col Collection from which the file is read.
+ #' @param sep Separator used in reading tsv, csv file format.
+ #' @param istable Used in reading txt file to check if the file is table or not.
+ #' @param fileclass Used in reading fasta file to set file class.
+ #' @param Ncol Used in reading binary file to set numbers of columns in data.frame.
+ #' @param Nrow Used in reading binary file to set numbers of rows in data.frame size.
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, collectionUUID)
+ #' readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table
+ #' readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text
+ #' readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata
+ #' readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta
+ #' readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary, only numbers
+ #' readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary with factor or text
+ #' }
+ readArvFile = function(file, con, sep = ',', istable = NULL, fileclass = "SeqFastadna", Ncol = NULL, Nrow = NULL, wantedFunction = NULL)
+ {
+ arvFile <- self$get(file)
+ FileName <- arvFile$getName()
+ FileName <- tolower(FileName)
+ FileFormat <- gsub(".*\\.", "", FileName)
+
+ # set enviroment
+ ARVADOS_API_TOKEN <- Sys.getenv("ARVADOS_API_TOKEN")
+ ARVADOS_API_HOST <- Sys.getenv("ARVADOS_API_HOST")
+ my_collection <- self$uuid
+ key <- gsub("/", "_", ARVADOS_API_TOKEN)
+
+ Sys.setenv(
+ "AWS_ACCESS_KEY_ID" = key,
+ "AWS_SECRET_ACCESS_KEY" = key,
+ "AWS_DEFAULT_REGION" = "collections",
+ "AWS_S3_ENDPOINT" = gsub("api[.]", "", ARVADOS_API_HOST))
+
+ if (FileFormat == "txt") {
+ if (is.null(istable)){
+ stop(paste('You need to paste whether it is a text or table file'))
+ } else if (istable == 'no') {
+ fileContent <- arvFile$read("text") # used to read
+ fileContent <- gsub("[\r\n]", " ", fileContent)
+ } else if (istable == 'yes') {
+ arvConnection <- arvFile$connection("r") # used to make possible use different function later
+ fileContent <- read.table(arvConnection)
+ }
+ }
+ else if (FileFormat == "xlsx") {
+ fileContent <- aws.s3::s3read_using(FUN = openxlsx::read.xlsx, object = file, bucket = my_collection)
+ }
+ else if (FileFormat == "csv" || FileFormat == "tsv") {
+ arvConnection <- arvFile$connection("r")
+ if (FileFormat == "tsv"){
+ mytable <- read.table(arvConnection, sep = '\t')
+ } else if (FileFormat == "csv" & sep == '\t') {
+ mytable <- read.table(arvConnection, sep = '\t')
+ } else if (FileFormat == "csv") {
+ mytable <- read.table(arvConnection, sep = ',')
+ } else {
+ stop(paste('File format not supported, use arvadosFile$connection() and customise it'))
+ }
+ }
+ else if (FileFormat == "fasta") {
+ fastafile <- aws.s3::s3read_using(FUN = seqinr::read.fasta, as.string = TRUE, object = file, bucket = my_collection)
+ }
+ else if (FileFormat == "dat" || FileFormat == "bin") {
+ fileContent <- gzcon(arvFile$connection("rb"))
+
+ # function to precess data to binary format
+ read_bin.file <- function(fileContent) {
+ # read binfile
+ column.names <- readBin(fileContent, character(), n = Ncol)
+ bindata <- readBin(fileContent, numeric(), Nrow*Ncol+Ncol)
+ # check
+ res <- which(bindata < 0.0000001)
+ if (is.list(res)) {
+ bindata <- bindata[-res]
+ } else {
+ bindata <- bindata
+ }
+ # make a dataframe
+ data <- data.frame(matrix(data = NA, nrow = Nrow, ncol = Ncol))
+ for (i in 1:Ncol) {
+ data[,i] <- bindata[(1+Nrow*(i-1)):(Nrow*i)]
+ }
+ colnames(data) = column.names
+
+ len <- which(is.na(data[,Ncol])) # error if sth went wrong
+ if (length(len) == 0) {
+ data
+ } else {
+ stop(paste("there is a factor or text in the table, customize the function by typing more arguments"))
+ }
+ }
+ if (is.null(Nrow) | is.null(Ncol)){
+ stop(paste('You need to specify numbers of columns and rows'))
+ }
+ if (is.null(istable)) {
+ fileContent <- read_bin.file(fileContent) # call a function
+ } else if (istable == "factor") { # if there is a table with col name
+ fileContent <- read_bin.file(fileContent)
+ }
+ }
+ else if (FileFormat == "rds" || FileFormat == "rdata") {
+ arvConnection <- arvFile$connection("rb")
+ mytable <- readRDS(gzcon(arvConnection))
+ }
+ else {
+ stop(parse(('File format not supported, use arvadosFile$connection() and customise it')))
+ }
+ },
+
+ #' @description
+ #' Write file content
+ #' @param name Name of the file.
+ #' @param file File to be saved.
+ #' @param istable Used in writing txt file to check if the file is table or not.
+ #' @examples
+ #' \dontrun{
+ #' collection <- Collection$new(arv, collectionUUID)
+ #' writeFile <- collection$writeFile(name = "myoutput.csv", file = file, fileFormat = "csv", istable = NULL, collectionUUID = collectionUUID) # csv
+ #' writeFile <- collection$writeFile(name = "myoutput.tsv", file = file, fileFormat = "tsv", istable = NULL, collectionUUID = collectionUUID) # tsv
+ #' writeFile <- collection$writeFile(name = "myoutput.fasta", file = file, fileFormat = "fasta", istable = NULL, collectionUUID = collectionUUID) # fasta
+ #' writeFile <- collection$writeFile(name = "myoutputtable.txt", file = file, fileFormat = "txt", istable = "yes", collectionUUID = collectionUUID) # txt table
+ #' writeFile <- collection$writeFile(name = "myoutputtext.txt", file = file, fileFormat = "txt", istable = "no", collectionUUID = collectionUUID) # txt text
+ #' writeFile <- collection$writeFile(name = "myoutputbinary.dat", file = file, fileFormat = "dat", collectionUUID = collectionUUID) # binary
+ #' writeFile <- collection$writeFile(name = "myoutputxlsx.xlsx", file = file, fileFormat = "xlsx", collectionUUID = collectionUUID) # xlsx
+ #' }
+ writeFile = function(name, file, collectionUUID, fileFormat, istable = NULL, seqName = NULL)
+ {
+ # set enviroment
+ ARVADOS_API_TOKEN <- Sys.getenv("ARVADOS_API_TOKEN")
+ ARVADOS_API_HOST <- Sys.getenv("ARVADOS_API_HOST")
+ my_collection <- self$uuid
+ key <- gsub("/", "_", ARVADOS_API_TOKEN)
+
+ Sys.setenv(
+ "AWS_ACCESS_KEY_ID" = key,
+ "AWS_SECRET_ACCESS_KEY" = key,
+ "AWS_DEFAULT_REGION" = "collections",
+ "AWS_S3_ENDPOINT" = gsub("api[.]", "", ARVADOS_API_HOST))
+
+ # save file
+ if (fileFormat == "txt") {
+ if (istable == "yes") {
+ aws.s3::s3write_using(file, FUN = write.table, object = name, bucket = collectionUUID)
+ } else if (istable == "no") {
+ aws.s3::s3write_using(file, FUN = writeChar, object = name, bucket = collectionUUID)
+ } else {
+ stop(paste("Specify parametr istable"))
+ }
+ } else if (fileFormat == "csv") {
+ aws.s3::s3write_using(file, FUN = write.csv, object = name, bucket = collectionUUID)
+ } else if (fileFormat == "tsv") {
+ aws.s3::s3write_using(file, FUN = write.table, row.names = FALSE, sep = "\t", object = name, bucket = collectionUUID)
+ } else if (fileFormat == "fasta") {
+ aws.s3::s3write_using(file, FUN = seqinr::write.fasta, name = seqName, object = name, bucket = collectionUUID)
+ } else if (fileFormat == "xlsx") {
+ aws.s3::s3write_using(file, FUN = openxlsx::write.xlsx, object = name, bucket = collectionUUID)
+ } else if (fileFormat == "dat" || fileFormat == "bin") {
+ aws.s3::s3write_using(file, FUN = writeBin, object = name, bucket = collectionUUID)
+ } else {
+ stop(parse(('File format not supported, use arvadosFile$connection() and customise it')))
+ }
+ },
+
+ #' @description
+ #' Creates one or more ArvadosFiles and adds them to the collection at specified path.
+ #' @param files Content to be created.
+ #' @examples
+ #' \dontrun{
+ #' collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list("ROX37196928443768648" = "ROX37742976443830153"))
+ #' }
create = function(files)
{
if(is.null(private$tree))
@@ -116,7 +274,7 @@ Collection <- R6::R6Class(
private$REST$create(file, self$uuid)
newTreeBranch$setCollection(self)
- newTreeBranch
+ newTreeBranch
})
}
else
@@ -127,6 +285,13 @@ Collection <- R6::R6Class(
}
},
+ #' @description
+ #' Remove one or more files from the collection.
+ #' @param paths Content to be removed.
+ #' @examples
+ #' \dontrun{
+ #' collection$remove(fileName.format)
+ #' }
remove = function(paths)
{
if(is.null(private$tree))
@@ -160,6 +325,14 @@ Collection <- R6::R6Class(
}
},
+ #' @description
+ #' Moves ArvadosFile or Subcollection to another location in the collection.
+ #' @param content Content to be moved.
+ #' @param destination Path to move content.
+ #' @examples
+ #' \dontrun{
+ #' collection$move("fileName.format", path)
+ #' }
move = function(content, destination)
{
if(is.null(private$tree))
@@ -175,6 +348,14 @@ Collection <- R6::R6Class(
elementToMove$move(destination)
},
+ #' @description
+ #' Copies ArvadosFile or Subcollection to another location in the collection.
+ #' @param content Content to be moved.
+ #' @param destination Path to move content.
+ #' @examples
+ #' \dontrun{
+ #' copied <- collection$copy("oldName.format", "newName.format")
+ #' }
copy = function(content, destination)
{
if(is.null(private$tree))
@@ -190,6 +371,12 @@ Collection <- R6::R6Class(
elementToCopy$copy(destination)
},
+ #' @description
+ #' Refreshes the environment.
+ #' @examples
+ #' \dontrun{
+ #' collection$refresh()
+ #' }
refresh = function()
{
if(!is.null(private$tree))
@@ -199,6 +386,12 @@ Collection <- R6::R6Class(
}
},
+ #' @description
+ #' Returns collections file content as character vector.
+ #' @examples
+ #' \dontrun{
+ #' list <- collection$getFileListing()
+ #' }
getFileListing = function()
{
if(is.null(private$tree))
@@ -208,6 +401,13 @@ Collection <- R6::R6Class(
content[order(tolower(content))]
},
+ #' @description
+ #' If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.
+ #' @param relativePath Path from content is taken.
+ #' @examples
+ #' \dontrun{
+ #' arvadosFile <- collection$get(fileName)
+ #' }
get = function(relativePath)
{
if(is.null(private$tree))
@@ -219,14 +419,14 @@ Collection <- R6::R6Class(
getRESTService = function() private$REST,
setRESTService = function(newRESTService) private$REST <- newRESTService
),
-
private = list(
REST = NULL,
+ #' @tree beautiful tree of sth
tree = NULL,
fileContent = NULL,
- generateCollectionTreeStructure = function()
+ generateCollectionTreeStructure = function(relativePath = NULL)
{
if(is.null(self$uuid))
stop("Collection uuid is not defined.")
@@ -234,7 +434,7 @@ Collection <- R6::R6Class(
if(is.null(private$REST))
stop("REST service is not defined.")
- private$fileContent <- private$REST$getCollectionContent(self$uuid)
+ private$fileContent <- private$REST$getCollectionContent(self$uuid, relativePath)
private$tree <- CollectionTree$new(private$fileContent, self)
}
),
diff --git a/sdk/R/R/HttpRequest.R b/sdk/R/R/HttpRequest.R
index 18b36f9689..4540902d2e 100644
--- a/sdk/R/R/HttpRequest.R
+++ b/sdk/R/R/HttpRequest.R
@@ -30,7 +30,6 @@ HttpRequest <- R6::R6Class(
if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE"))
config$options = list(ssl_verifypeer = 0L)
- # times = 1 regular call + numberOfRetries
response <- httr::RETRY(verb, url = url, body = body,
config = config, times = retryTimes + 1)
},
diff --git a/sdk/R/R/RESTService.R b/sdk/R/R/RESTService.R
index 9c65e72861..5cbcb65f75 100644
--- a/sdk/R/R/RESTService.R
+++ b/sdk/R/R/RESTService.R
@@ -110,10 +110,11 @@ RESTService <- R6::R6Class(
serverResponse
},
- getCollectionContent = function(uuid)
+ getCollectionContent = function(uuid, relativePath = NULL)
+
{
collectionURL <- URLencode(paste0(self$getWebDavHostName(),
- "c=", uuid))
+ "c=", uuid, "/", relativePath))
headers <- list("Authorization" = paste("Bearer", self$token))
@@ -227,8 +228,12 @@ RESTService <- R6::R6Class(
serverResponse <- self$http$exec("PUT", fileURL, headers, body,
retryTimes = self$numRetries)
- if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
- stop(paste("Server code:", serverResponse$status_code))
+ if (serverResponse$status_code < 200){ # to wyrzuca bÅÄdy
+ stop(paste("Server code:", serverResponse$status_code))}
+ else if (serverResponse$status_code >= 300 & serverResponse$status_code < 422) {
+ stop(paste("Server code:", serverResponse$status_code))}
+ else if (serverResponse$status_code == 422 ) {
+ stop(paste("Project of that name already exists. If you want to change it use project_update() instead"))}
paste("File created:", relativePath)
}
diff --git a/sdk/R/R/Subcollection.R b/sdk/R/R/Subcollection.R
index 981bd687a2..752a396655 100644
--- a/sdk/R/R/Subcollection.R
+++ b/sdk/R/R/Subcollection.R
@@ -2,46 +2,11 @@
#
# SPDX-License-Identifier: Apache-2.0
-#' Subcollection
+#' R6 Class Representing a Subcollection
#'
+#' @description
#' Subcollection class represents a folder inside Arvados collection.
#' It is essentially a composite of arvadosFiles and other subcollections.
-#'
-#' @section Usage:
-#' \preformatted{subcollection = Subcollection$new(name)}
-#'
-#' @section Arguments:
-#' \describe{
-#' \item{name}{Name of the subcollection.}
-#' }
-#'
-#' @section Methods:
-#' \describe{
-#' \item{getName()}{Returns name of the subcollection.}
-#' \item{getRelativePath()}{Returns subcollection path relative to the root.}
-#' \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the subcollection.}
-#' \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
-#' \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
-#' \item{getFileListing()}{Returns subcollections file content as character vector.}
-#' \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
-#' \item{move(destination)}{Moves subcollection to a new location inside collection.}
-#' \item{copy(destination)}{Copies subcollection to a new location inside collection.}
-#' }
-#'
-#' @name Subcollection
-#' @examples
-#' \dontrun{
-#' myFolder <- Subcollection$new("myFolder")
-#' myFile <- ArvadosFile$new("myFile")
-#'
-#' myFolder$add(myFile)
-#' myFolder$get("myFile")
-#' myFolder$remove("myFile")
-#'
-#' myFolder$move("newLocation/myFolder")
-#' myFolder$copy("newLocation/myFolder")
-#' }
-NULL
#' @export
Subcollection <- R6::R6Class(
@@ -50,13 +15,21 @@ Subcollection <- R6::R6Class(
public = list(
+ #' @description
+ #' Initialize new enviroment.
+ #' @param name Name of the new enviroment.
+ #' @return A new `Subcollection` object.
initialize = function(name)
{
private$name <- name
},
+ #' @description
+ #' Returns name of the file.
getName = function() private$name,
+ #' @description
+ #' Returns Subcollection's path relative to the root.
getRelativePath = function()
{
relativePath <- c(private$name)
@@ -72,6 +45,9 @@ Subcollection <- R6::R6Class(
paste0(relativePath, collapse = "/")
},
+ #' @description
+ #' Adds ArvadosFile or Subcollection specified by content to the Subcollection.
+ #' @param content Content to be added.
add = function(content)
{
if("ArvadosFile" %in% class(content) ||
@@ -115,6 +91,9 @@ Subcollection <- R6::R6Class(
}
},
+ #' @description
+ #' Removes ArvadosFile or Subcollection specified by name from the Subcollection.
+ #' @param name Name of the file to be removed.
remove = function(name)
{
if(is.character(name))
@@ -146,12 +125,17 @@ Subcollection <- R6::R6Class(
}
},
+ #' @description
+ #' Returns Subcollections file content as character vector.
+ #' @param fullPath Checking if the path to file exists.
getFileListing = function(fullPath = TRUE)
{
content <- private$getContentAsCharVector(fullPath)
content[order(tolower(content))]
},
+ #' @description
+ #' Returns subcollections content size in bytes.
getSizeInBytes = function()
{
if(is.null(private$collection))
@@ -164,6 +148,9 @@ Subcollection <- R6::R6Class(
return(sum(fileSizes))
},
+ #' @description
+ #' Moves Subcollection to a new location inside collection.
+ #' @param destination Path to move the file.
move = function(destination)
{
if(is.null(private$collection))
@@ -196,6 +183,9 @@ Subcollection <- R6::R6Class(
self
},
+ #' @description
+ #' Copies Subcollection to a new location inside collection.
+ #' @param destination Path to copy the file.
copy = function(destination)
{
if(is.null(private$collection))
@@ -227,6 +217,9 @@ Subcollection <- R6::R6Class(
newContent
},
+ #' @description
+ #' Duplicate Subcollection and gives it a new name.
+ #' @param newName New name for duplicated file.
duplicate = function(newName = NULL)
{
name <- if(!is.null(newName)) newName else private$name
@@ -237,6 +230,9 @@ Subcollection <- R6::R6Class(
root
},
+ #' @description
+ #' If name is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.
+ #' @param name Name of the file.
get = function(name)
{
for(child in private$children)
@@ -248,14 +244,18 @@ Subcollection <- R6::R6Class(
return(NULL)
},
+ #' @description
+ #' Returns files in Subcollection.
getFirst = function()
{
if(length(private$children) == 0)
- return(NULL)
+ return(NULL)
private$children[[1]]
},
+ #' @description
+ #' Sets Collection by its UUID.
setCollection = function(collection, setRecursively = TRUE)
{
private$collection = collection
@@ -267,10 +267,16 @@ Subcollection <- R6::R6Class(
}
},
+ #' @description
+ #' Returns Collection of Subcollection.
getCollection = function() private$collection,
+ #' @description
+ #' Returns Collection UUID.
getParent = function() private$parent,
+ #' @description
+ #' Sets new Collection.
setParent = function(newParent) private$parent <- newParent
),
diff --git a/sdk/R/R/autoGenAPI.R b/sdk/R/R/autoGenAPI.R
index c86684f8b0..fbf58c2f51 100644
--- a/sdk/R/R/autoGenAPI.R
+++ b/sdk/R/R/autoGenAPI.R
@@ -9,6 +9,10 @@ getAPIDocument <- function(){
httr::content(serverResponse, as = "parsed", type = "application/json")
}
+#' generateAPI
+#'
+#' Autogenerate classes to interact with Arvados from the Arvados discovery document.
+#'
#' @export
generateAPI <- function()
{
diff --git a/sdk/R/README.Rmd b/sdk/R/README.Rmd
deleted file mode 100644
index 8cc89d9020..0000000000
--- a/sdk/R/README.Rmd
+++ /dev/null
@@ -1,339 +0,0 @@
-[comment]: # (Copyright (c) The Arvados Authors. All rights reserved.)
-[comment]: # ()
-[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
-
-## R SDK for Arvados
-
-This SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections.
-The API is not final and feedback is solicited from users on ways in which it could be improved.
-
-### Installation
-
-```{r include=FALSE}
-knitr::opts_chunk$set(eval=FALSE)
-```
-
-```{r}
-install.packages("ArvadosR", repos=c("https://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE)
-```
-
-Note: on Linux, you may have to install supporting packages.
-
-On Centos 7, this is:
-
-```{bash}
-yum install libxml2-devel openssl-devel curl-devel
-```
-
-On Debian, this is:
-
-```{bash}
-apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev
-```
-
-Minimum R version required to run ArvadosR is 3.3.0.
-
-
-### Usage
-
-#### Initializing API
-
-* Load Library and Initialize API:
-
-```{r}
-library('ArvadosR')
-# use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST
-arv <- Arvados$new()
-
-# provide them explicitly
-arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-```
-
-Optionally, add numRetries parameter to specify number of times to retry failed service requests.
-Default is 0.
-
-```{r}
-arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3)
-```
-
-This parameter can be set at any time using setNumRetries
-
-```{r}
-arv$setNumRetries(5)
-```
-
-
-#### Working with collections
-
-* Get a collection:
-
-```{r}
-collection <- arv$collections.get("uuid")
-```
-
-Be aware that the result from `collections.get` is _not_ a
-`Collection` class. The object returned from this method lets you
-access collection fields like "name" and "description". The
-`Collection` class lets you access the files in the collection for
-reading and writing, and is described in the next section.
-
-* List collections:
-
-```{r}
-# offset of 0 and default limit of 100
-collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
-
-collectionList <- arv$collections.list(list(list("name", "like", "Test%")), limit = 10, offset = 2)
-
-# count of total number of items (may be more than returned due to paging)
-collectionList$items_available
-
-# items which match the filter criteria
-collectionList$items
-```
-
-* List all collections even if the number of items is greater than maximum API limit:
-
-```{r}
-collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test%")))
-```
-
-* Delete a collection:
-
-```{r}
-deletedCollection <- arv$collections.delete("uuid")
-```
-
-* Update a collection's metadata:
-
-```{r}
-updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"), "uuid")
-```
-
-* Create a new collection:
-
-```{r}
-newCollection <- arv$collections.create(list(name = "Example", description = "This is a test collection"))
-```
-
-
-#### Manipulating collection content
-
-* Initialize a collection object:
-
-```{r}
-collection <- Collection$new(arv, "uuid")
-```
-
-* Get list of files:
-
-```{r}
-files <- collection$getFileListing()
-```
-
-* Get ArvadosFile or Subcollection from internal tree-like structure:
-
-```{r}
-arvadosFile <- collection$get("location/to/my/file.cpp")
-```
-
-or
-
-```{r}
-arvadosSubcollection <- collection$get("location/to/my/directory/")
-```
-
-* Read a table:
-
-```{r}
-arvadosFile <- collection$get("myinput.txt")
-arvConnection <- arvadosFile$connection("r")
-mytable <- read.table(arvConnection)
-```
-
-* Write a table:
-
-```{r}
-arvadosFile <- collection$create("myoutput.txt")[[1]]
-arvConnection <- arvadosFile$connection("w")
-write.table(mytable, arvConnection)
-arvadosFile$flush()
-```
-
-* Write to existing file (overwrites current content of the file):
-
-```{r}
-arvadosFile <- collection$get("location/to/my/file.cpp")
-arvadosFile$write("This is new file content")
-```
-
-* Read whole file or just a portion of it:
-
-```{r}
-fileContent <- arvadosFile$read()
-fileContent <- arvadosFile$read("text")
-fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
-```
-
-* Get ArvadosFile or Subcollection size:
-
-```{r}
-size <- arvadosFile$getSizeInBytes()
-```
-
-or
-
-```{r}
-size <- arvadosSubcollection$getSizeInBytes()
-```
-
-* Create new file in a collection (returns a vector of one or more ArvadosFile objects):
-
-```{r}
-collection$create(files)
-```
-
-Example:
-
-```{r}
-mainFile <- collection$create("cpp/src/main.cpp")[[1]]
-fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h"))
-```
-
-* Delete file from a collection:
-
-```{r}
-collection$remove("location/to/my/file.cpp")
-```
-
-You can remove both Subcollection and ArvadosFile.
-If subcollection contains more files or folders they will be removed recursively.
-
-You can also remove multiple files at once:
-
-```{r}
-collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp"))
-```
-
-* Delete file or folder from a Subcollection:
-
-```{r}
-subcollection <- collection$get("mySubcollection/")
-subcollection$remove("fileInsideSubcollection.exe")
-subcollection$remove("folderInsideSubcollection/")
-```
-
-* Move or rename a file or folder within a collection (moving between collections is currently not supported):
-
-Directly from collection
-
-```{r}
-collection$move("folder/file.cpp", "file.cpp")
-```
-
-Or from file
-
-```{r}
-file <- collection$get("location/to/my/file.cpp")
-file$move("newDestination/file.cpp")
-```
-
-Or from subcollection
-
-```{r}
-subcollection <- collection$get("location/to/folder")
-subcollection$move("newDestination/folder")
-```
-
-Make sure to include new file name in destination.
-In second example file$move("newDestination/") will not work.
-
-* Copy file or folder within a collection (copying between collections is currently not supported):
-
-Directly from collection
-
-```{r}
-collection$copy("folder/file.cpp", "file.cpp")
-```
-
-Or from file
-
-```{r}
-file <- collection$get("location/to/my/file.cpp")
-file$copy("destination/file.cpp")
-```
-
-Or from subcollection
-
-```{r}
-subcollection <- collection$get("location/to/folder")
-subcollection$copy("destination/folder")
-```
-
-#### Working with Aravdos projects
-
-* Get a project:
-
-```{r}
-project <- arv$projects.get("uuid")
-```
-
-* List projects:
-
-```{r}
-list subprojects of a project
-projects <- arv$projects.list(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc")))
-
-list projects which have names beginning with Example
-examples <- arv$projects.list(list(list("name","like","Example%")))
-```
-
-* List all projects even if the number of items is greater than maximum API limit:
-
-```{r}
-projects <- listAll(arv$projects.list, list(list("name","like","Example%")))
-```
-
-* Delete a project:
-
-```{r}
-deletedProject <- arv$projects.delete("uuid")
-```
-
-* Update project:
-
-```{r}
-updatedProject <- arv$projects.update(list(name = "new_name", description = "new description"), "uuid")
-```
-
-* Create project:
-
-```{r}
-newProject <- arv$projects.update(list(name = "project_name", description = "project description"))
-```
-
-#### Help
-
-* View help page of Arvados classes by puting ? before class name:
-
-```{r}
-?Arvados
-?Collection
-?Subcollection
-?ArvadosFile
-```
-
-* View help page of any method defined in Arvados class by puting ? before method name:
-
-```{r}
-?collections.update
-?jobs.get
-```
-
-### Building the ArvadosR package
-
-```{bash}
-cd arvados/sdk && R CMD build R
-```
-
-This will create a tarball of the ArvadosR package in the current directory.
diff --git a/sdk/R/README.md b/sdk/R/README.md
new file mode 100644
index 0000000000..fe98e648ca
--- /dev/null
+++ b/sdk/R/README.md
@@ -0,0 +1,379 @@
+[comment]: # (Copyright © The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
+
+# R SDK for Arvados
+
+This SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections. The API is not final and feedback is solicited from users on ways in which it could be improved.
+
+## Key Topics
+* Installation
+* Usage
+ * Initializing API
+ * Working with collections
+ * Manipulating collection content
+ * Working with Arvados projects
+ * Help
+* Building the ArvadosR package
+
+## Installation
+
+Minimum R version required to run ArvadosR is 3.3.0.
+
+```r
+install.packages("ArvadosR", repos=c("https://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE)
+library('ArvadosR')
+```
+
+> **Note**
+> On Linux, you may have to install supporting packages.
+>
+> On Centos 7, this is:
+> ```
+> yum install libxml2-devel openssl-devel curl-devel
+> ```
+>
+> On Debian, this is:
+> ```
+> apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev
+> ```
+
+
+## Usage
+
+### Initializing API
+
+```r
+# use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST
+arv <- Arvados$new()
+
+# provide them explicitly
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+```
+
+Optionally, add `numRetries` parameter to specify number of times to retry failed service requests. Default is 0.
+
+```r
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3)
+```
+
+This parameter can be set at any time using `setNumRetries`
+
+```r
+arv$setNumRetries(5)
+```
+
+### Working with Aravdos projects
+
+##### Create project:
+
+```r
+newProject <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
+```
+
+##### Update project:
+
+```r
+updatedProject <- arv$project_update(name = "new project name", properties = newProperties, uuid = "projectUUID")
+```
+
+##### Delete a project:
+
+```r
+deletedProject <- arv$project_delete("uuid")
+```
+
+#### Find a project:
+
+##### Get a project:
+
+```r
+project <- arv$project_get("uuid")
+```
+
+##### List projects:
+
+```r
+list subprojects of a project
+projects <- arv$project_list(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc")))
+
+list projects which have names beginning with Example
+examples <- arv$project_list(list(list("name","like","Example%")))
+```
+
+##### List all projects even if the number of items is greater than maximum API limit:
+
+```r
+projects <- listAll(arv$project_list, list(list("name","like","Example%")))
+```
+
+### Working with collections
+
+#### Create a new collection:
+
+```r
+newCollection <- arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
+```
+
+#### Update a collectionâs metadata:
+
+```r
+collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
+```
+
+#### Delete a collection:
+
+```r
+deletedCollection <- arv$collections_delete("uuid")
+```
+
+#### Find a collection:
+
+#### Get a collection:
+
+```r
+collection <- arv$collections_get("uuid")
+```
+
+Be aware that the result from `collections_get` is not a Collection class. The object returned from this method lets you access collection fields like ânameâ and âdescriptionâ. The Collection class lets you access the files in the collection for reading and writing, and is described in the next section.
+
+#### List collections:
+
+```r
+# offset of 0 and default limit of 100
+collectionList <- arv$collections_list(list(list("name", "like", "Test%")))
+
+collectionList <- arv$collections_list(list(list("name", "like", "Test%")), limit = 10, offset = 2)
+
+# count of total number of items (may be more than returned due to paging)
+collectionList$items_available
+
+# items which match the filter criteria
+collectionList$items
+```
+
+#### List all collections even if the number of items is greater than maximum API limit:
+
+```r
+collectionList <- listAll(arv$collections_list, list(list("name", "like", "Test%")))
+```
+
+### Manipulating collection content
+
+#### Initialize a collection object:
+
+```r
+collection <- Collection$new(arv, "uuid")
+```
+
+#### Get list of files:
+
+```r
+files <- collection$getFileListing()
+```
+
+#### Get ArvadosFile or Subcollection from internal tree-like structure:
+
+```r
+arvadosFile <- collection$get("location/to/my/file.cpp")
+# or
+arvadosSubcollection <- collection$get("location/to/my/directory/")
+```
+
+#### Read a table:
+
+```r
+arvadosFile <- collection$get("myinput.txt")
+arvConnection <- arvadosFile$connection("r")
+mytable <- read.table(arvConnection)
+```
+
+#### Write a table:
+
+```r
+arvadosFile <- collection$create("myoutput.txt")[[1]]
+arvConnection <- arvadosFile$connection("w")
+write.table(mytable, arvConnection)
+arvadosFile$flush()
+```
+
+#### Read a table from a tab delimited file:
+
+```r
+arvadosFile <- collection$get("myinput.txt")
+arvConnection <- arvadosFile$connection("r")
+mytable <- read.delim(arvConnection)
+```
+
+#### Read a gzip compressed R object:
+
+```r
+obj <- readRDS(gzcon(coll$get("abc.RDS")$connection("rb")))
+```
+
+#### Write to existing file (overwrites current content of the file):
+
+```r
+arvadosFile <- collection$get("location/to/my/file.cpp")
+arvadosFile$write("This is new file content")
+```
+
+#### Read whole file or just a portion of it:
+
+```r
+fileContent <- arvadosFile$read()
+fileContent <- arvadosFile$read("text")
+fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
+```
+
+#### Read various file types:
+
+Chooses file type based on file name extension. Recognized file extensions: 'txt', 'xlsx', 'csv', 'tsv', 'fasta', 'dat', 'bin', 'rds', 'rdata'.
+
+```r
+collection <- Collection$new(arv, collectionUUID)
+readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table
+readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text
+readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata
+readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta
+readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary data.frame, only numbers
+readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary data.frame with factor or text
+```
+
+#### Get ArvadosFile or Subcollection size:
+
+```r
+size <- arvadosFile$getSizeInBytes()
+# or
+size <- arvadosSubcollection$getSizeInBytes()
+```
+
+#### Create new file in a collection (returns a vector of one or more ArvadosFile objects):
+
+```r
+collection$create(files)
+```
+
+**Example**
+
+```
+mainFile <- collection$create("cpp/src/main.cpp")[[1]]
+fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h"))
+```
+
+#### Delete file from a collection:
+
+```r
+collection$remove("location/to/my/file.cpp")
+```
+
+You can remove both Subcollection and ArvadosFile. If subcollection contains more files or folders they will be removed recursively.
+
+> **Note**
+> You can also remove multiple files at once:
+> ```
+> collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp"))
+> ```
+
+#### Delete file or folder from a Subcollection:
+
+```r
+subcollection <- collection$get("mySubcollection/")
+subcollection$remove("fileInsideSubcollection.exe")
+subcollection$remove("folderInsideSubcollection/")
+```
+
+#### Move or rename a file or folder within a collection (moving between collections is currently not supported):
+
+##### Directly from collection
+
+```r
+collection$move("folder/file.cpp", "file.cpp")
+```
+
+##### Or from file
+
+```r
+file <- collection$get("location/to/my/file.cpp")
+file$move("newDestination/file.cpp")
+```
+
+##### Or from subcollection
+
+```r
+subcollection <- collection$get("location/to/folder")
+subcollection$move("newDestination/folder")
+```
+
+> **Note**
+> Make sure to include new file name in destination. In second example `file$move(ânewDestination/â)` will not work.
+
+#### Copy file or folder within a collection (copying between collections is currently not supported):
+
+##### Directly from collection
+
+```r
+collection$copy("folder/file.cpp", "file.cpp")
+```
+
+##### Or from file
+
+```r
+file <- collection$get("location/to/my/file.cpp")
+file$copy("destination/file.cpp")
+```
+
+##### Or from subcollection
+
+```r
+subcollection <- collection$get("location/to/folder")
+subcollection$copy("destination/folder")
+```
+
+
+### Help
+
+#### View help page of Arvados classes by puting `?` before class name:
+
+```r
+?Arvados
+?Collection
+?Subcollection
+?ArvadosFile
+```
+
+#### View help page of any method defined in Arvados class by puting `?` before method name:
+
+```r
+?collections_update
+?jobs_get
+```
+
+
+
+## Building the ArvadosR package
+
+```r
+cd arvados/sdk && R CMD build R
+```
+
+This will create a tarball of the ArvadosR package in the current directory.
+
+
+
+## Documentation
+
+Complete documentation, including the [User Guide](https://doc.arvados.org/user/index.html), [Installation documentation](https://doc.arvados.org/install/index.html), [Administrator documentation](https://doc.arvados.org/admin/index.html) and
+[API documentation](https://doc.arvados.org/api/index.html) is available at http://doc.arvados.org/
+
+## Community
+
+Visit [Arvados Community and Getting Help](https://doc.arvados.org/user/getting_started/community.html).
+
+## Reporting bugs
+
+[Report a bug](https://dev.arvados.org/projects/arvados/issues/new) on [dev.arvados.org](https://dev.arvados.org).
+
+## Licensing
+
+Arvados is Free Software. See [Arvados Free Software Licenses](https://doc.arvados.org/user/copying/copying.html) for information about the open source licenses used in Arvados.
diff --git a/sdk/R/install_deps.R b/sdk/R/install_deps.R
index 6c33f97913..4e6c5c88f4 100644
--- a/sdk/R/install_deps.R
+++ b/sdk/R/install_deps.R
@@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: Apache-2.0
-options(repos=structure(c(CRAN="http://cran.wustl.edu/")))
+options(repos=structure(c(CRAN="https://cloud.r-project.org/")))
if (!requireNamespace("devtools")) {
install.packages("devtools")
}
@@ -16,10 +16,7 @@ if (!requireNamespace("markdown")) {
install.packages("markdown")
}
if (!requireNamespace("XML")) {
- # XML 3.99-0.4 depends on R >= 4.0.0, but we run tests on debian
- # stable (10) with R 3.5.2 so we install an older version from
- # source.
- install.packages("https://cran.r-project.org/src/contrib/Archive/XML/XML_3.99-0.3.tar.gz", repos=NULL, type="source")
+ install.packages("XML")
}
devtools::install_dev_deps()
diff --git a/sdk/R/man/Arvados.Rd b/sdk/R/man/Arvados.Rd
index b55dd00dc5..924bfeae9b 100644
--- a/sdk/R/man/Arvados.Rd
+++ b/sdk/R/man/Arvados.Rd
@@ -1,206 +1,3026 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{Arvados}
-\alias{Arvados}
-\title{Arvados}
-\description{
-Arvados class gives users ability to access Arvados REST API.
-}
-\section{Usage}{
-
-\preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
-}
-
-\section{Arguments}{
-
-\describe{
- \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
- \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
- \item{numRetries}{Number which specifies how many times to retry failed service requests.}
-}
-}
-
-\section{Methods}{
-
-\describe{
- \item{}{\code{\link{api_client_authorizations.create}}}
- \item{}{\code{\link{api_client_authorizations.create_system_auth}}}
- \item{}{\code{\link{api_client_authorizations.current}}}
- \item{}{\code{\link{api_client_authorizations.delete}}}
- \item{}{\code{\link{api_client_authorizations.get}}}
- \item{}{\code{\link{api_client_authorizations.list}}}
- \item{}{\code{\link{api_client_authorizations.update}}}
- \item{}{\code{\link{api_clients.create}}}
- \item{}{\code{\link{api_clients.delete}}}
- \item{}{\code{\link{api_clients.get}}}
- \item{}{\code{\link{api_clients.list}}}
- \item{}{\code{\link{api_clients.update}}}
- \item{}{\code{\link{authorized_keys.create}}}
- \item{}{\code{\link{authorized_keys.delete}}}
- \item{}{\code{\link{authorized_keys.get}}}
- \item{}{\code{\link{authorized_keys.list}}}
- \item{}{\code{\link{authorized_keys.update}}}
- \item{}{\code{\link{collections.create}}}
- \item{}{\code{\link{collections.delete}}}
- \item{}{\code{\link{collections.get}}}
- \item{}{\code{\link{collections.list}}}
- \item{}{\code{\link{collections.provenance}}}
- \item{}{\code{\link{collections.trash}}}
- \item{}{\code{\link{collections.untrash}}}
- \item{}{\code{\link{collections.update}}}
- \item{}{\code{\link{collections.used_by}}}
- \item{}{\code{\link{container_requests.create}}}
- \item{}{\code{\link{container_requests.delete}}}
- \item{}{\code{\link{container_requests.get}}}
- \item{}{\code{\link{container_requests.list}}}
- \item{}{\code{\link{container_requests.update}}}
- \item{}{\code{\link{containers.auth}}}
- \item{}{\code{\link{containers.create}}}
- \item{}{\code{\link{containers.current}}}
- \item{}{\code{\link{containers.delete}}}
- \item{}{\code{\link{containers.get}}}
- \item{}{\code{\link{containers.list}}}
- \item{}{\code{\link{containers.lock}}}
- \item{}{\code{\link{containers.secret_mounts}}}
- \item{}{\code{\link{containers.unlock}}}
- \item{}{\code{\link{containers.update}}}
- \item{}{\code{\link{groups.contents}}}
- \item{}{\code{\link{groups.create}}}
- \item{}{\code{\link{groups.delete}}}
- \item{}{\code{\link{groups.get}}}
- \item{}{\code{\link{groups.list}}}
- \item{}{\code{\link{groups.trash}}}
- \item{}{\code{\link{groups.untrash}}}
- \item{}{\code{\link{groups.update}}}
- \item{}{\code{\link{humans.create}}}
- \item{}{\code{\link{humans.delete}}}
- \item{}{\code{\link{humans.get}}}
- \item{}{\code{\link{humans.list}}}
- \item{}{\code{\link{humans.update}}}
- \item{}{\code{\link{jobs.cancel}}}
- \item{}{\code{\link{jobs.create}}}
- \item{}{\code{\link{jobs.delete}}}
- \item{}{\code{\link{jobs.get}}}
- \item{}{\code{\link{jobs.list}}}
- \item{}{\code{\link{jobs.lock}}}
- \item{}{\code{\link{jobs.queue}}}
- \item{}{\code{\link{jobs.queue_size}}}
- \item{}{\code{\link{jobs.update}}}
- \item{}{\code{\link{job_tasks.create}}}
- \item{}{\code{\link{job_tasks.delete}}}
- \item{}{\code{\link{job_tasks.get}}}
- \item{}{\code{\link{job_tasks.list}}}
- \item{}{\code{\link{job_tasks.update}}}
- \item{}{\code{\link{keep_disks.create}}}
- \item{}{\code{\link{keep_disks.delete}}}
- \item{}{\code{\link{keep_disks.get}}}
- \item{}{\code{\link{keep_disks.list}}}
- \item{}{\code{\link{keep_disks.ping}}}
- \item{}{\code{\link{keep_disks.update}}}
- \item{}{\code{\link{keep_services.accessible}}}
- \item{}{\code{\link{keep_services.create}}}
- \item{}{\code{\link{keep_services.delete}}}
- \item{}{\code{\link{keep_services.get}}}
- \item{}{\code{\link{keep_services.list}}}
- \item{}{\code{\link{keep_services.update}}}
- \item{}{\code{\link{links.create}}}
- \item{}{\code{\link{links.delete}}}
- \item{}{\code{\link{links.get}}}
- \item{}{\code{\link{links.get_permissions}}}
- \item{}{\code{\link{links.list}}}
- \item{}{\code{\link{links.update}}}
- \item{}{\code{\link{logs.create}}}
- \item{}{\code{\link{logs.delete}}}
- \item{}{\code{\link{logs.get}}}
- \item{}{\code{\link{logs.list}}}
- \item{}{\code{\link{logs.update}}}
- \item{}{\code{\link{nodes.create}}}
- \item{}{\code{\link{nodes.delete}}}
- \item{}{\code{\link{nodes.get}}}
- \item{}{\code{\link{nodes.list}}}
- \item{}{\code{\link{nodes.ping}}}
- \item{}{\code{\link{nodes.update}}}
- \item{}{\code{\link{pipeline_instances.cancel}}}
- \item{}{\code{\link{pipeline_instances.create}}}
- \item{}{\code{\link{pipeline_instances.delete}}}
- \item{}{\code{\link{pipeline_instances.get}}}
- \item{}{\code{\link{pipeline_instances.list}}}
- \item{}{\code{\link{pipeline_instances.update}}}
- \item{}{\code{\link{pipeline_templates.create}}}
- \item{}{\code{\link{pipeline_templates.delete}}}
- \item{}{\code{\link{pipeline_templates.get}}}
- \item{}{\code{\link{pipeline_templates.list}}}
- \item{}{\code{\link{pipeline_templates.update}}}
- \item{}{\code{\link{projects.create}}}
- \item{}{\code{\link{projects.delete}}}
- \item{}{\code{\link{projects.get}}}
- \item{}{\code{\link{projects.list}}}
- \item{}{\code{\link{projects.update}}}
- \item{}{\code{\link{repositories.create}}}
- \item{}{\code{\link{repositories.delete}}}
- \item{}{\code{\link{repositories.get}}}
- \item{}{\code{\link{repositories.get_all_permissions}}}
- \item{}{\code{\link{repositories.list}}}
- \item{}{\code{\link{repositories.update}}}
- \item{}{\code{\link{specimens.create}}}
- \item{}{\code{\link{specimens.delete}}}
- \item{}{\code{\link{specimens.get}}}
- \item{}{\code{\link{specimens.list}}}
- \item{}{\code{\link{specimens.update}}}
- \item{}{\code{\link{traits.create}}}
- \item{}{\code{\link{traits.delete}}}
- \item{}{\code{\link{traits.get}}}
- \item{}{\code{\link{traits.list}}}
- \item{}{\code{\link{traits.update}}}
- \item{}{\code{\link{user_agreements.create}}}
- \item{}{\code{\link{user_agreements.delete}}}
- \item{}{\code{\link{user_agreements.get}}}
- \item{}{\code{\link{user_agreements.list}}}
- \item{}{\code{\link{user_agreements.new}}}
- \item{}{\code{\link{user_agreements.sign}}}
- \item{}{\code{\link{user_agreements.signatures}}}
- \item{}{\code{\link{user_agreements.update}}}
- \item{}{\code{\link{users.activate}}}
- \item{}{\code{\link{users.create}}}
- \item{}{\code{\link{users.current}}}
- \item{}{\code{\link{users.delete}}}
- \item{}{\code{\link{users.get}}}
- \item{}{\code{\link{users.list}}}
- \item{}{\code{\link{users.merge}}}
- \item{}{\code{\link{users.setup}}}
- \item{}{\code{\link{users.system}}}
- \item{}{\code{\link{users.unsetup}}}
- \item{}{\code{\link{users.update}}}
- \item{}{\code{\link{virtual_machines.create}}}
- \item{}{\code{\link{virtual_machines.delete}}}
- \item{}{\code{\link{virtual_machines.get}}}
- \item{}{\code{\link{virtual_machines.get_all_logins}}}
- \item{}{\code{\link{virtual_machines.list}}}
- \item{}{\code{\link{virtual_machines.logins}}}
- \item{}{\code{\link{virtual_machines.update}}}
- \item{}{\code{\link{workflows.create}}}
- \item{}{\code{\link{workflows.delete}}}
- \item{}{\code{\link{workflows.get}}}
- \item{}{\code{\link{workflows.list}}}
- \item{}{\code{\link{workflows.update}}}
-}
-}
-
-\examples{
-\dontrun{
-arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-
-collection <- arv$collections.get("uuid")
-
-collectionList <- arv$collections.list(list(list("name", "like", "Test\%")))
-collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test\%")))
-
-deletedCollection <- arv$collections.delete("uuid")
-
-updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"),
- "uuid")
-
-createdCollection <- arv$collections.create(list(name = "Example",
- description = "This is a test collection"))
-}
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{Arvados}
+\alias{Arvados}
+\title{R6 Class Representing a Arvados}
+\description{
+Arvados class gives users ability to access Arvados REST API. It also allowes user to manipulate collections (and projects?)
+}
+\examples{
+
+## ------------------------------------------------
+## Method `Arvados$new`
+## ------------------------------------------------
+
+arv <- Arvados$new(authToken = "ARVADOS_API_TOKEN", hostName = "ARVADOS_API_HOST", numRetries = 3)
+
+## ------------------------------------------------
+## Method `Arvados$project_exist`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_exist(uuid = "projectUUID")
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_get`
+## ------------------------------------------------
+
+\dontrun{
+project <- arv$project_get(uuid = 'projectUUID')
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_create`
+## ------------------------------------------------
+
+\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+new_project <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_properties_set`
+## ------------------------------------------------
+
+\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+arv$project_properties_set(Properties, uuid)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_properties_append`
+## ------------------------------------------------
+
+\dontrun{
+newProperties <- list() # should contain a list of new properties to be added
+arv$project_properties_append(properties = newProperties, uuid)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_properties_get`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_properties_get(projectUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_properties_delete`
+## ------------------------------------------------
+
+\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+arv$project_properties_delete(Properties, projectUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_update`
+## ------------------------------------------------
+
+\dontrun{
+newProperties <- list() # should contain a list of new properties to be added
+arv$project_update(name = "new project name", properties = newProperties, uuid = projectUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_list`
+## ------------------------------------------------
+
+\dontrun{
+listOfprojects <- arv$project_list(list(list("owner_uuid", "=", projectUUID))) # Sample query which show projects within the project of a given UUID
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_delete`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_delete(uuid = 'projectUUID')
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_get`
+## ------------------------------------------------
+
+\dontrun{
+collection <- arv$collections_get(uuid = collectionUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_create`
+## ------------------------------------------------
+
+\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_update`
+## ------------------------------------------------
+
+\dontrun{
+collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_delete`
+## ------------------------------------------------
+
+\dontrun{
+arv$collection_delete(collectionUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_provenance`
+## ------------------------------------------------
+
+\dontrun{
+collection <- arv$collections_provenance(collectionUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_trash`
+## ------------------------------------------------
+
+\dontrun{
+arv$collections_trash(collectionUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_untrash`
+## ------------------------------------------------
+
+\dontrun{
+arv$collections_untrash(collectionUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$collections_list`
+## ------------------------------------------------
+
+\dontrun{
+collectionList <- arv$collections_list(list(list("name", "=", "Example")))
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_permission_give`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_permission_give(type = "can_read", uuid = objectUUID, user = userUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_permission_refuse`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_permission_refuse(type = "can_read", uuid = objectUUID, user = userUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_permission_update`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_permission_update(typeOld = "can_read", typeNew = "can_write", uuid = objectUUID, user = userUUID)
+}
+
+## ------------------------------------------------
+## Method `Arvados$project_permission_check`
+## ------------------------------------------------
+
+\dontrun{
+arv$project_permission_check(type = "can_read", uuid = objectUUID, user = userUUID)
+}
+}
+\section{Methods}{
+\subsection{Public methods}{
+\itemize{
+\item \href{#method-Arvados-new}{\code{Arvados$new()}}
+\item \href{#method-Arvados-project_exist}{\code{Arvados$project_exist()}}
+\item \href{#method-Arvados-project_get}{\code{Arvados$project_get()}}
+\item \href{#method-Arvados-project_create}{\code{Arvados$project_create()}}
+\item \href{#method-Arvados-project_properties_set}{\code{Arvados$project_properties_set()}}
+\item \href{#method-Arvados-project_properties_append}{\code{Arvados$project_properties_append()}}
+\item \href{#method-Arvados-project_properties_get}{\code{Arvados$project_properties_get()}}
+\item \href{#method-Arvados-project_properties_delete}{\code{Arvados$project_properties_delete()}}
+\item \href{#method-Arvados-project_update}{\code{Arvados$project_update()}}
+\item \href{#method-Arvados-project_list}{\code{Arvados$project_list()}}
+\item \href{#method-Arvados-project_delete}{\code{Arvados$project_delete()}}
+\item \href{#method-Arvados-api_clients_get}{\code{Arvados$api_clients_get()}}
+\item \href{#method-Arvados-api_clients_create}{\code{Arvados$api_clients_create()}}
+\item \href{#method-Arvados-api_clients_update}{\code{Arvados$api_clients_update()}}
+\item \href{#method-Arvados-api_clients_delete}{\code{Arvados$api_clients_delete()}}
+\item \href{#method-Arvados-api_clients_list}{\code{Arvados$api_clients_list()}}
+\item \href{#method-Arvados-api_client_authorizations_get}{\code{Arvados$api_client_authorizations_get()}}
+\item \href{#method-Arvados-api_client_authorizations_create}{\code{Arvados$api_client_authorizations_create()}}
+\item \href{#method-Arvados-api_client_authorizations_update}{\code{Arvados$api_client_authorizations_update()}}
+\item \href{#method-Arvados-api_client_authorizations_delete}{\code{Arvados$api_client_authorizations_delete()}}
+\item \href{#method-Arvados-api_client_authorizations_create_system_auth}{\code{Arvados$api_client_authorizations_create_system_auth()}}
+\item \href{#method-Arvados-api_client_authorizations_current}{\code{Arvados$api_client_authorizations_current()}}
+\item \href{#method-Arvados-api_client_authorizations_list}{\code{Arvados$api_client_authorizations_list()}}
+\item \href{#method-Arvados-authorized_keys_get}{\code{Arvados$authorized_keys_get()}}
+\item \href{#method-Arvados-authorized_keys_create}{\code{Arvados$authorized_keys_create()}}
+\item \href{#method-Arvados-authorized_keys_update}{\code{Arvados$authorized_keys_update()}}
+\item \href{#method-Arvados-authorized_keys_delete}{\code{Arvados$authorized_keys_delete()}}
+\item \href{#method-Arvados-authorized_keys_list}{\code{Arvados$authorized_keys_list()}}
+\item \href{#method-Arvados-collections_get}{\code{Arvados$collections_get()}}
+\item \href{#method-Arvados-collections_create}{\code{Arvados$collections_create()}}
+\item \href{#method-Arvados-collections_update}{\code{Arvados$collections_update()}}
+\item \href{#method-Arvados-collections_delete}{\code{Arvados$collections_delete()}}
+\item \href{#method-Arvados-collections_provenance}{\code{Arvados$collections_provenance()}}
+\item \href{#method-Arvados-collections_used_by}{\code{Arvados$collections_used_by()}}
+\item \href{#method-Arvados-collections_trash}{\code{Arvados$collections_trash()}}
+\item \href{#method-Arvados-collections_untrash}{\code{Arvados$collections_untrash()}}
+\item \href{#method-Arvados-collections_list}{\code{Arvados$collections_list()}}
+\item \href{#method-Arvados-containers_get}{\code{Arvados$containers_get()}}
+\item \href{#method-Arvados-containers_create}{\code{Arvados$containers_create()}}
+\item \href{#method-Arvados-containers_update}{\code{Arvados$containers_update()}}
+\item \href{#method-Arvados-containers_delete}{\code{Arvados$containers_delete()}}
+\item \href{#method-Arvados-containers_auth}{\code{Arvados$containers_auth()}}
+\item \href{#method-Arvados-containers_lock}{\code{Arvados$containers_lock()}}
+\item \href{#method-Arvados-containers_unlock}{\code{Arvados$containers_unlock()}}
+\item \href{#method-Arvados-containers_secret_mounts}{\code{Arvados$containers_secret_mounts()}}
+\item \href{#method-Arvados-containers_current}{\code{Arvados$containers_current()}}
+\item \href{#method-Arvados-containers_list}{\code{Arvados$containers_list()}}
+\item \href{#method-Arvados-container_requests_get}{\code{Arvados$container_requests_get()}}
+\item \href{#method-Arvados-container_requests_create}{\code{Arvados$container_requests_create()}}
+\item \href{#method-Arvados-container_requests_update}{\code{Arvados$container_requests_update()}}
+\item \href{#method-Arvados-container_requests_delete}{\code{Arvados$container_requests_delete()}}
+\item \href{#method-Arvados-container_requests_list}{\code{Arvados$container_requests_list()}}
+\item \href{#method-Arvados-groups_get}{\code{Arvados$groups_get()}}
+\item \href{#method-Arvados-groups_create}{\code{Arvados$groups_create()}}
+\item \href{#method-Arvados-groups_update}{\code{Arvados$groups_update()}}
+\item \href{#method-Arvados-groups_delete}{\code{Arvados$groups_delete()}}
+\item \href{#method-Arvados-groups_contents}{\code{Arvados$groups_contents()}}
+\item \href{#method-Arvados-groups_shared}{\code{Arvados$groups_shared()}}
+\item \href{#method-Arvados-groups_trash}{\code{Arvados$groups_trash()}}
+\item \href{#method-Arvados-groups_untrash}{\code{Arvados$groups_untrash()}}
+\item \href{#method-Arvados-groups_list}{\code{Arvados$groups_list()}}
+\item \href{#method-Arvados-keep_services_get}{\code{Arvados$keep_services_get()}}
+\item \href{#method-Arvados-keep_services_create}{\code{Arvados$keep_services_create()}}
+\item \href{#method-Arvados-keep_services_update}{\code{Arvados$keep_services_update()}}
+\item \href{#method-Arvados-keep_services_delete}{\code{Arvados$keep_services_delete()}}
+\item \href{#method-Arvados-keep_services_accessible}{\code{Arvados$keep_services_accessible()}}
+\item \href{#method-Arvados-keep_services_list}{\code{Arvados$keep_services_list()}}
+\item \href{#method-Arvados-project_permission_give}{\code{Arvados$project_permission_give()}}
+\item \href{#method-Arvados-project_permission_refuse}{\code{Arvados$project_permission_refuse()}}
+\item \href{#method-Arvados-project_permission_update}{\code{Arvados$project_permission_update()}}
+\item \href{#method-Arvados-project_permission_check}{\code{Arvados$project_permission_check()}}
+\item \href{#method-Arvados-links_get}{\code{Arvados$links_get()}}
+\item \href{#method-Arvados-links_create}{\code{Arvados$links_create()}}
+\item \href{#method-Arvados-links_update}{\code{Arvados$links_update()}}
+\item \href{#method-Arvados-links_delete}{\code{Arvados$links_delete()}}
+\item \href{#method-Arvados-links_list}{\code{Arvados$links_list()}}
+\item \href{#method-Arvados-links_get_permissions}{\code{Arvados$links_get_permissions()}}
+\item \href{#method-Arvados-logs_get}{\code{Arvados$logs_get()}}
+\item \href{#method-Arvados-logs_create}{\code{Arvados$logs_create()}}
+\item \href{#method-Arvados-logs_update}{\code{Arvados$logs_update()}}
+\item \href{#method-Arvados-logs_delete}{\code{Arvados$logs_delete()}}
+\item \href{#method-Arvados-logs_list}{\code{Arvados$logs_list()}}
+\item \href{#method-Arvados-users_get}{\code{Arvados$users_get()}}
+\item \href{#method-Arvados-users_create}{\code{Arvados$users_create()}}
+\item \href{#method-Arvados-users_update}{\code{Arvados$users_update()}}
+\item \href{#method-Arvados-users_delete}{\code{Arvados$users_delete()}}
+\item \href{#method-Arvados-users_current}{\code{Arvados$users_current()}}
+\item \href{#method-Arvados-users_system}{\code{Arvados$users_system()}}
+\item \href{#method-Arvados-users_activate}{\code{Arvados$users_activate()}}
+\item \href{#method-Arvados-users_setup}{\code{Arvados$users_setup()}}
+\item \href{#method-Arvados-users_unsetup}{\code{Arvados$users_unsetup()}}
+\item \href{#method-Arvados-users_merge}{\code{Arvados$users_merge()}}
+\item \href{#method-Arvados-users_list}{\code{Arvados$users_list()}}
+\item \href{#method-Arvados-repositories_get}{\code{Arvados$repositories_get()}}
+\item \href{#method-Arvados-repositories_create}{\code{Arvados$repositories_create()}}
+\item \href{#method-Arvados-repositories_update}{\code{Arvados$repositories_update()}}
+\item \href{#method-Arvados-repositories_delete}{\code{Arvados$repositories_delete()}}
+\item \href{#method-Arvados-repositories_get_all_permissions}{\code{Arvados$repositories_get_all_permissions()}}
+\item \href{#method-Arvados-repositories_list}{\code{Arvados$repositories_list()}}
+\item \href{#method-Arvados-virtual_machines_get}{\code{Arvados$virtual_machines_get()}}
+\item \href{#method-Arvados-virtual_machines_create}{\code{Arvados$virtual_machines_create()}}
+\item \href{#method-Arvados-virtual_machines_update}{\code{Arvados$virtual_machines_update()}}
+\item \href{#method-Arvados-virtual_machines_delete}{\code{Arvados$virtual_machines_delete()}}
+\item \href{#method-Arvados-virtual_machines_logins}{\code{Arvados$virtual_machines_logins()}}
+\item \href{#method-Arvados-virtual_machines_get_all_logins}{\code{Arvados$virtual_machines_get_all_logins()}}
+\item \href{#method-Arvados-virtual_machines_list}{\code{Arvados$virtual_machines_list()}}
+\item \href{#method-Arvados-workflows_get}{\code{Arvados$workflows_get()}}
+\item \href{#method-Arvados-workflows_create}{\code{Arvados$workflows_create()}}
+\item \href{#method-Arvados-workflows_update}{\code{Arvados$workflows_update()}}
+\item \href{#method-Arvados-workflows_delete}{\code{Arvados$workflows_delete()}}
+\item \href{#method-Arvados-workflows_list}{\code{Arvados$workflows_list()}}
+\item \href{#method-Arvados-user_agreements_get}{\code{Arvados$user_agreements_get()}}
+\item \href{#method-Arvados-user_agreements_create}{\code{Arvados$user_agreements_create()}}
+\item \href{#method-Arvados-user_agreements_update}{\code{Arvados$user_agreements_update()}}
+\item \href{#method-Arvados-user_agreements_delete}{\code{Arvados$user_agreements_delete()}}
+\item \href{#method-Arvados-user_agreements_signatures}{\code{Arvados$user_agreements_signatures()}}
+\item \href{#method-Arvados-user_agreements_sign}{\code{Arvados$user_agreements_sign()}}
+\item \href{#method-Arvados-user_agreements_list}{\code{Arvados$user_agreements_list()}}
+\item \href{#method-Arvados-user_agreements_new}{\code{Arvados$user_agreements_new()}}
+\item \href{#method-Arvados-configs_get}{\code{Arvados$configs_get()}}
+\item \href{#method-Arvados-getHostName}{\code{Arvados$getHostName()}}
+\item \href{#method-Arvados-getToken}{\code{Arvados$getToken()}}
+\item \href{#method-Arvados-setRESTService}{\code{Arvados$setRESTService()}}
+\item \href{#method-Arvados-getRESTService}{\code{Arvados$getRESTService()}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-new}{}}}
+\subsection{Method \code{new()}}{
+Initialize new enviroment.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{authToken}}{ARVADOS_API_TOKEN from 'Get API Token' on Arvados.}
+
+\item{\code{hostName}}{ARVADOS_API_HOST from 'Get API Token' on Arvados.}
+
+\item{\code{numRetries}}{Specify number of times to retry failed service requests.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Returns}{
+A new `Arvados` object.
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{arv <- Arvados$new(authToken = "ARVADOS_API_TOKEN", hostName = "ARVADOS_API_HOST", numRetries = 3)
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_exist}{}}}
+\subsection{Method \code{project_exist()}}{
+project_exist enables checking if the project with such a UUID exist.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_exist(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of a project or a file.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_exist(uuid = "projectUUID")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_get}{}}}
+\subsection{Method \code{project_get()}}{
+project_get returns the demanded project.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Group in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+project <- arv$project_get(uuid = 'projectUUID')
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_create}{}}}
+\subsection{Method \code{project_create()}}{
+project_create creates a new project of a given name and description.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_create(
+ name,
+ description,
+ ownerUUID,
+ properties = NULL,
+ ensureUniqueName = "false"
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the project.}
+
+\item{\code{description}}{Description of the project.}
+
+\item{\code{ownerUUID}}{The UUID of the maternal project to created one.}
+
+\item{\code{properties}}{List of the properties of the project.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+new_project <- arv$project_create(name = "project name", description = "project description", owner_uuid = "project UUID", properties = NULL, ensureUniqueName = "false")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_properties_set}{}}}
+\subsection{Method \code{project_properties_set()}}{
+project_properties_set is a method defined in Arvados class that enables setting properties. Allows to set or overwrite the properties. In case there are set already it overwrites them.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_properties_set(listProperties, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{listProperties}}{List of new properties.}
+
+\item{\code{uuid}}{The UUID of a project or a file.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+arv$project_properties_set(Properties, uuid)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_properties_append}{}}}
+\subsection{Method \code{project_properties_append()}}{
+project_properties_append is a method defined in Arvados class that enables appending properties. Allows to add new properties.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_properties_append(properties, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{properties}}{List of new properties.}
+
+\item{\code{uuid}}{The UUID of a project or a file.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+newProperties <- list() # should contain a list of new properties to be added
+arv$project_properties_append(properties = newProperties, uuid)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_properties_get}{}}}
+\subsection{Method \code{project_properties_get()}}{
+project_properties_get is a method defined in Arvados class that returns properties.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_properties_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of a project or a file.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_properties_get(projectUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_properties_delete}{}}}
+\subsection{Method \code{project_properties_delete()}}{
+project_properties_delete is a method defined in Arvados class that deletes list of properties.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_properties_delete(oneProp, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{oneProp}}{Property to be deleted.}
+
+\item{\code{uuid}}{The UUID of a project or a file.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+arv$project_properties_delete(Properties, projectUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_update}{}}}
+\subsection{Method \code{project_update()}}{
+project_update enables updating project. New name, description and properties may be given.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_update(..., uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{...}}{Feature to be updated (name, description, properties).}
+
+\item{\code{uuid}}{The UUID of a project in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+newProperties <- list() # should contain a list of new properties to be added
+arv$project_update(name = "new project name", properties = newProperties, uuid = projectUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_list}{}}}
+\subsection{Method \code{project_list()}}{
+project_list enables listing project by its name, uuid, properties, permissions.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ includeTrash = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
+
+\item{\code{uuid}}{The UUID of a project in question.}
+
+\item{\code{recursive}}{Include contents from child groups recursively.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+listOfprojects <- arv$project_list(list(list("owner_uuid", "=", projectUUID))) # Sample query which show projects within the project of a given UUID
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_delete}{}}}
+\subsection{Method \code{project_delete()}}{
+project_delete trashes project of a given uuid. It can be restored from trash or deleted permanently.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Group in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_delete(uuid = 'projectUUID')
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_clients_get}{}}}
+\subsection{Method \code{api_clients_get()}}{
+api_clients_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_clients_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the apiClient in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_clients_create}{}}}
+\subsection{Method \code{api_clients_create()}}{
+api_clients_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_clients_create(
+ apiClient,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{apiClient}}{apiClient object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_clients_update}{}}}
+\subsection{Method \code{api_clients_update()}}{
+api_clients_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_clients_update(apiClient, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{apiClient}}{apiClient object.}
+
+\item{\code{uuid}}{The UUID of the apiClient in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_clients_delete}{}}}
+\subsection{Method \code{api_clients_delete()}}{
+api_clients_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_clients_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the apiClient in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_clients_list}{}}}
+\subsection{Method \code{api_clients_list()}}{
+api_clients_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_clients_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_get}{}}}
+\subsection{Method \code{api_client_authorizations_get()}}{
+api_client_authorizations_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the apiClientAuthorization in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_create}{}}}
+\subsection{Method \code{api_client_authorizations_create()}}{
+api_client_authorizations_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_create(
+ apiClientAuthorization,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{apiClientAuthorization}}{apiClientAuthorization object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error on (ownerUUID, name) collision_}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_update}{}}}
+\subsection{Method \code{api_client_authorizations_update()}}{
+api_client_authorizations_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_update(apiClientAuthorization, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{apiClientAuthorization}}{apiClientAuthorization object.}
+
+\item{\code{uuid}}{The UUID of the apiClientAuthorization in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_delete}{}}}
+\subsection{Method \code{api_client_authorizations_delete()}}{
+api_client_authorizations_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the apiClientAuthorization in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_create_system_auth}{}}}
+\subsection{Method \code{api_client_authorizations_create_system_auth()}}{
+api_client_authorizations_create_system_auth is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_create_system_auth(
+ apiClientID = NULL,
+ scopes = NULL
+)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_current}{}}}
+\subsection{Method \code{api_client_authorizations_current()}}{
+api_client_authorizations_current is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_current()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-api_client_authorizations_list}{}}}
+\subsection{Method \code{api_client_authorizations_list()}}{
+api_client_authorizations_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$api_client_authorizations_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_get}{}}}
+\subsection{Method \code{authorized_keys_get()}}{
+authorized_keys_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$authorized_keys_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the authorizedKey in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_create}{}}}
+\subsection{Method \code{authorized_keys_create()}}{
+authorized_keys_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$authorized_keys_create(
+ authorizedKey,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{authorizedKey}}{authorizedKey object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_update}{}}}
+\subsection{Method \code{authorized_keys_update()}}{
+authorized_keys_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$authorized_keys_update(authorizedKey, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{authorizedKey}}{authorizedKey object.}
+
+\item{\code{uuid}}{The UUID of the authorizedKey in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_delete}{}}}
+\subsection{Method \code{authorized_keys_delete()}}{
+authorized_keys_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$authorized_keys_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the authorizedKey in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-authorized_keys_list}{}}}
+\subsection{Method \code{authorized_keys_list()}}{
+authorized_keys_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$authorized_keys_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_get}{}}}
+\subsection{Method \code{collections_get()}}{
+collections_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- arv$collections_get(uuid = collectionUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_create}{}}}
+\subsection{Method \code{collections_create()}}{
+collections_create is a method defined in Arvados class that enables collections creation.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_create(
+ name,
+ description,
+ ownerUUID = NULL,
+ properties = NULL,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the collection.}
+
+\item{\code{description}}{Description of the collection.}
+
+\item{\code{ownerUUID}}{UUID of the maternal project to created one.}
+
+\item{\code{properties}}{Properties of the collection.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+Properties <- list() # should contain a list of new properties to be added
+arv$collections_create(name = "collectionTitle", description = "collectionDescription", ownerUUID = "collectionOwner", properties = Properties)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_update}{}}}
+\subsection{Method \code{collections_update()}}{
+collections_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_update(
+ name,
+ description,
+ ownerUUID = NULL,
+ properties = NULL,
+ uuid
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{New name of the collection.}
+
+\item{\code{description}}{New description of the collection.}
+
+\item{\code{ownerUUID}}{UUID of the maternal project to created one.}
+
+\item{\code{properties}}{New list of properties of the collection.}
+
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- arv$collections_update(name = "newCollectionTitle", description = "newCollectionDescription", ownerUUID = "collectionOwner", properties = NULL, uuid = "collectionUUID")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_delete}{}}}
+\subsection{Method \code{collections_delete()}}{
+collections_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$collection_delete(collectionUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_provenance}{}}}
+\subsection{Method \code{collections_provenance()}}{
+collections_provenance is a method defined in Arvados class, it returns the collection by uuid.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_provenance(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- arv$collections_provenance(collectionUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_used_by}{}}}
+\subsection{Method \code{collections_used_by()}}{
+collections_used_by is a method defined in Arvados class, it returns collection by portable_data_hash.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_used_by(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_trash}{}}}
+\subsection{Method \code{collections_trash()}}{
+collections_trash is a method defined in Arvados class, it moves collection to trash.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_trash(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$collections_trash(collectionUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_untrash}{}}}
+\subsection{Method \code{collections_untrash()}}{
+collections_untrash is a method defined in Arvados class, it moves collection from trash to project.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_untrash(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Collection in question.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$collections_untrash(collectionUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-collections_list}{}}}
+\subsection{Method \code{collections_list()}}{
+collections_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$collections_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL,
+ includeTrash = NULL,
+ includeOldVersions = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+
+\item{\code{includeTrash}}{Include collections whose is_trashed attribute is true.}
+
+\item{\code{includeOldVersions}}{Include past collection versions.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collectionList <- arv$collections_list(list(list("name", "=", "Example")))
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_get}{}}}
+\subsection{Method \code{containers_get()}}{
+containers_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_create}{}}}
+\subsection{Method \code{containers_create()}}{
+containers_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_create(
+ container,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{container}}{Container object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_update}{}}}
+\subsection{Method \code{containers_update()}}{
+containers_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_update(container, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{container}}{Container object.}
+
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_delete}{}}}
+\subsection{Method \code{containers_delete()}}{
+containers_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_auth}{}}}
+\subsection{Method \code{containers_auth()}}{
+containers_auth is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_auth(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_lock}{}}}
+\subsection{Method \code{containers_lock()}}{
+containers_lock is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_lock(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_unlock}{}}}
+\subsection{Method \code{containers_unlock()}}{
+containers_unlock is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_unlock(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_secret_mounts}{}}}
+\subsection{Method \code{containers_secret_mounts()}}{
+containers_secret_mounts is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_secret_mounts(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Container in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_current}{}}}
+\subsection{Method \code{containers_current()}}{
+containers_current is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_current()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-containers_list}{}}}
+\subsection{Method \code{containers_list()}}{
+containers_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$containers_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-container_requests_get}{}}}
+\subsection{Method \code{container_requests_get()}}{
+container_requests_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$container_requests_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the containerRequest in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-container_requests_create}{}}}
+\subsection{Method \code{container_requests_create()}}{
+container_requests_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$container_requests_create(
+ containerRequest,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{containerRequest}}{containerRequest object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-container_requests_update}{}}}
+\subsection{Method \code{container_requests_update()}}{
+container_requests_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$container_requests_update(containerRequest, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{containerRequest}}{containerRequest object.}
+
+\item{\code{uuid}}{The UUID of the containerRequest in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-container_requests_delete}{}}}
+\subsection{Method \code{container_requests_delete()}}{
+container_requests_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$container_requests_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the containerRequest in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-container_requests_list}{}}}
+\subsection{Method \code{container_requests_list()}}{
+container_requests_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$container_requests_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL,
+ includeTrash = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{bypass federation behavior, list items from local instance database only}
+
+\item{\code{includeTrash}}{Include container requests whose owner project is trashed.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_get}{}}}
+\subsection{Method \code{groups_get()}}{
+groups_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Group in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_create}{}}}
+\subsection{Method \code{groups_create()}}{
+groups_create is a method defined in Arvados class that supports project creation.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_create(
+ group,
+ ensureUniqueName = "false",
+ clusterID = NULL,
+ async = "false"
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{group}}{Group object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+
+\item{\code{async}}{Defer permissions update.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_update}{}}}
+\subsection{Method \code{groups_update()}}{
+groups_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_update(group, uuid, async = "false")}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{group}}{Group object.}
+
+\item{\code{uuid}}{The UUID of the Group in question.}
+
+\item{\code{async}}{Defer permissions update.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_delete}{}}}
+\subsection{Method \code{groups_delete()}}{
+groups_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Group in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_contents}{}}}
+\subsection{Method \code{groups_contents()}}{
+groups_contents is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_contents(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL,
+ includeTrash = NULL,
+ uuid = NULL,
+ recursive = NULL,
+ include = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+
+\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
+
+\item{\code{recursive}}{Include contents from child groups recursively.}
+
+\item{\code{include}}{Include objects referred to by listed field in "included" (only ownerUUID).}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_shared}{}}}
+\subsection{Method \code{groups_shared()}}{
+groups_shared is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_shared(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL,
+ includeTrash = NULL,
+ include = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+
+\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_trash}{}}}
+\subsection{Method \code{groups_trash()}}{
+groups_trash is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_trash(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Group in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_untrash}{}}}
+\subsection{Method \code{groups_untrash()}}{
+groups_untrash is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_untrash(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Group in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-groups_list}{}}}
+\subsection{Method \code{groups_list()}}{
+groups_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$groups_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL,
+ includeTrash = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+
+\item{\code{includeTrash}}{Include items whose is_trashed attribute is true.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-keep_services_get}{}}}
+\subsection{Method \code{keep_services_get()}}{
+keep_services_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$keep_services_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the keepService in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-keep_services_create}{}}}
+\subsection{Method \code{keep_services_create()}}{
+keep_services_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$keep_services_create(
+ keepService,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{keepService}}{keepService object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-keep_services_update}{}}}
+\subsection{Method \code{keep_services_update()}}{
+keep_services_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$keep_services_update(keepService, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{keepService}}{keepService object.}
+
+\item{\code{uuid}}{The UUID of the keepService in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-keep_services_delete}{}}}
+\subsection{Method \code{keep_services_delete()}}{
+keep_services_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$keep_services_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the keepService in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-keep_services_accessible}{}}}
+\subsection{Method \code{keep_services_accessible()}}{
+keep_services_accessible is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$keep_services_accessible()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-keep_services_list}{}}}
+\subsection{Method \code{keep_services_list()}}{
+keep_services_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$keep_services_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_permission_give}{}}}
+\subsection{Method \code{project_permission_give()}}{
+project_permission_give is a method defined in Arvados class that enables sharing files with another users.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_permission_give(type, uuid, user)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{type}}{Possible options are can_read or can_write or can_manage.}
+
+\item{\code{uuid}}{The UUID of a project or a file.}
+
+\item{\code{user}}{The UUID of the person that gets the permission.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_permission_give(type = "can_read", uuid = objectUUID, user = userUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_permission_refuse}{}}}
+\subsection{Method \code{project_permission_refuse()}}{
+project_permission_refuse is a method defined in Arvados class that unables sharing files with another users.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_permission_refuse(type, uuid, user)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{type}}{Possible options are can_read or can_write or can_manage.}
+
+\item{\code{uuid}}{The UUID of a project or a file.}
+
+\item{\code{user}}{The UUID of a person that permissions are taken from.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_permission_refuse(type = "can_read", uuid = objectUUID, user = userUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_permission_update}{}}}
+\subsection{Method \code{project_permission_update()}}{
+project_permission_update is a method defined in Arvados class that enables updating permissions.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_permission_update(typeOld, typeNew, uuid, user)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{typeOld}}{Old option.}
+
+\item{\code{typeNew}}{New option like can_read or can_write or can_manage.}
+
+\item{\code{uuid}}{The UUID of a project or a file.}
+
+\item{\code{user}}{The UUID of the person that the permission is being updated.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_permission_update(typeOld = "can_read", typeNew = "can_write", uuid = objectUUID, user = userUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-project_permission_check}{}}}
+\subsection{Method \code{project_permission_check()}}{
+project_permission_check is a method defined in Arvados class that enables checking file permissions.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$project_permission_check(uuid, user, type = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of a project or a file.}
+
+\item{\code{user}}{The UUID of the person that the permission is being updated.}
+
+\item{\code{type}}{Possible options are can_read or can_write or can_manage.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arv$project_permission_check(type = "can_read", uuid = objectUUID, user = userUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-links_get}{}}}
+\subsection{Method \code{links_get()}}{
+links_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$links_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Link in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-links_create}{}}}
+\subsection{Method \code{links_create()}}{
+links_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$links_create(link, ensureUniqueName = "false", clusterID = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{link}}{Link object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-links_update}{}}}
+\subsection{Method \code{links_update()}}{
+links_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$links_update(link, uuid, async = "false")}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{link}}{Link object.}
+
+\item{\code{uuid}}{The UUID of the Link in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-links_delete}{}}}
+\subsection{Method \code{links_delete()}}{
+links_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$links_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Link in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-links_list}{}}}
+\subsection{Method \code{links_list()}}{
+links_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$links_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-links_get_permissions}{}}}
+\subsection{Method \code{links_get_permissions()}}{
+links_get_permissions is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$links_get_permissions(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Log in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-logs_get}{}}}
+\subsection{Method \code{logs_get()}}{
+logs_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$logs_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Log in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-logs_create}{}}}
+\subsection{Method \code{logs_create()}}{
+logs_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$logs_create(log, ensureUniqueName = "false", clusterID = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{log}}{Log object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-logs_update}{}}}
+\subsection{Method \code{logs_update()}}{
+logs_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$logs_update(log, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{log}}{Log object.}
+
+\item{\code{uuid}}{The UUID of the Log in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-logs_delete}{}}}
+\subsection{Method \code{logs_delete()}}{
+logs_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$logs_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Log in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-logs_list}{}}}
+\subsection{Method \code{logs_list()}}{
+logs_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$logs_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_get}{}}}
+\subsection{Method \code{users_get()}}{
+users_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the User in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_create}{}}}
+\subsection{Method \code{users_create()}}{
+users_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_create(user, ensureUniqueName = "false", clusterID = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{user}}{User object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_update}{}}}
+\subsection{Method \code{users_update()}}{
+users_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_update(user, uuid, bypassFederation = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{user}}{User object.}
+
+\item{\code{uuid}}{The UUID of the User in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_delete}{}}}
+\subsection{Method \code{users_delete()}}{
+users_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the User in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_current}{}}}
+\subsection{Method \code{users_current()}}{
+users_current is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_current()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_system}{}}}
+\subsection{Method \code{users_system()}}{
+users_system is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_system()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_activate}{}}}
+\subsection{Method \code{users_activate()}}{
+users_activate is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_activate(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the User in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_setup}{}}}
+\subsection{Method \code{users_setup()}}{
+users_setup is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_setup(
+ uuid = NULL,
+ user = NULL,
+ repo_name = NULL,
+ vm_uuid = NULL,
+ send_notification_email = "false"
+)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_unsetup}{}}}
+\subsection{Method \code{users_unsetup()}}{
+users_unsetup is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_unsetup(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the User in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_merge}{}}}
+\subsection{Method \code{users_merge()}}{
+users_merge is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_merge(
+ newOwnerUUID,
+ newUserToken = NULL,
+ redirectToNewUser = NULL,
+ oldUserUUID = NULL,
+ newUserUUID = NULL
+)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-users_list}{}}}
+\subsection{Method \code{users_list()}}{
+users_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$users_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-repositories_get}{}}}
+\subsection{Method \code{repositories_get()}}{
+repositories_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$repositories_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Repository in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-repositories_create}{}}}
+\subsection{Method \code{repositories_create()}}{
+repositories_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$repositories_create(
+ repository,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{repository}}{Repository object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-repositories_update}{}}}
+\subsection{Method \code{repositories_update()}}{
+repositories_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$repositories_update(repository, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{repository}}{Repository object.}
+
+\item{\code{uuid}}{The UUID of the Repository in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-repositories_delete}{}}}
+\subsection{Method \code{repositories_delete()}}{
+repositories_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$repositories_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Repository in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-repositories_get_all_permissions}{}}}
+\subsection{Method \code{repositories_get_all_permissions()}}{
+repositories_get_all_permissions is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$repositories_get_all_permissions()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-repositories_list}{}}}
+\subsection{Method \code{repositories_list()}}{
+repositories_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$repositories_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_get}{}}}
+\subsection{Method \code{virtual_machines_get()}}{
+virtual_machines_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the virtualMachine in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_create}{}}}
+\subsection{Method \code{virtual_machines_create()}}{
+virtual_machines_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_create(
+ virtualMachine,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{virtualMachine}}{virtualMachine object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_update}{}}}
+\subsection{Method \code{virtual_machines_update()}}{
+virtual_machines_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_update(virtualMachine, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{virtualMachine}}{virtualMachine object.}
+
+\item{\code{uuid}}{The UUID of the virtualMachine in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_delete}{}}}
+\subsection{Method \code{virtual_machines_delete()}}{
+virtual_machines_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the virtualMachine in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_logins}{}}}
+\subsection{Method \code{virtual_machines_logins()}}{
+virtual_machines_logins is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_logins(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the virtualMachine in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_get_all_logins}{}}}
+\subsection{Method \code{virtual_machines_get_all_logins()}}{
+virtual_machines_get_all_logins is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_get_all_logins()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-virtual_machines_list}{}}}
+\subsection{Method \code{virtual_machines_list()}}{
+virtual_machines_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$virtual_machines_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{bypass federation behavior, list items from local instance database only}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-workflows_get}{}}}
+\subsection{Method \code{workflows_get()}}{
+workflows_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$workflows_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Workflow in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-workflows_create}{}}}
+\subsection{Method \code{workflows_create()}}{
+workflows_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$workflows_create(
+ workflow,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{workflow}}{Workflow object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-workflows_update}{}}}
+\subsection{Method \code{workflows_update()}}{
+workflows_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$workflows_update(workflow, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{workflow}}{Workflow object.}
+
+\item{\code{uuid}}{The UUID of the Workflow in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-workflows_delete}{}}}
+\subsection{Method \code{workflows_delete()}}{
+workflows_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$workflows_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the Workflow in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-workflows_list}{}}}
+\subsection{Method \code{workflows_list()}}{
+workflows_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$workflows_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_get}{}}}
+\subsection{Method \code{user_agreements_get()}}{
+user_agreements_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_get(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the userAgreement in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_create}{}}}
+\subsection{Method \code{user_agreements_create()}}{
+user_agreements_create is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_create(
+ userAgreement,
+ ensureUniqueName = "false",
+ clusterID = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{userAgreement}}{userAgreement object.}
+
+\item{\code{ensureUniqueName}}{Adjust name to ensure uniqueness instead of returning an error.}
+
+\item{\code{clusterID}}{Create object on a remote federated cluster instead of the current one.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_update}{}}}
+\subsection{Method \code{user_agreements_update()}}{
+user_agreements_update is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_update(userAgreement, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{userAgreement}}{userAgreement object.}
+
+\item{\code{uuid}}{The UUID of the userAgreement in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_delete}{}}}
+\subsection{Method \code{user_agreements_delete()}}{
+user_agreements_delete is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_delete(uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{The UUID of the userAgreement in question.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_signatures}{}}}
+\subsection{Method \code{user_agreements_signatures()}}{
+user_agreements_signatures is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_signatures()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_sign}{}}}
+\subsection{Method \code{user_agreements_sign()}}{
+user_agreements_sign is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_sign()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_list}{}}}
+\subsection{Method \code{user_agreements_list()}}{
+user_agreements_list is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_list(
+ filters = NULL,
+ where = NULL,
+ order = NULL,
+ select = NULL,
+ distinct = NULL,
+ limit = "100",
+ offset = "0",
+ count = "exact",
+ clusterID = NULL,
+ bypassFederation = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{clusterID}}{List objects on a remote federated cluster instead of the current one.}
+
+\item{\code{bypassFederation}}{Bypass federation behavior, list items from local instance database only.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-user_agreements_new}{}}}
+\subsection{Method \code{user_agreements_new()}}{
+user_agreements_new is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$user_agreements_new()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-configs_get}{}}}
+\subsection{Method \code{configs_get()}}{
+configs_get is a method defined in Arvados class.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$configs_get()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-getHostName}{}}}
+\subsection{Method \code{getHostName()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$getHostName()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-getToken}{}}}
+\subsection{Method \code{getToken()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$getToken()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-setRESTService}{}}}
+\subsection{Method \code{setRESTService()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$setRESTService(newREST)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Arvados-getRESTService}{}}}
+\subsection{Method \code{getRESTService()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Arvados$getRESTService()}\if{html}{\out{
}}
+}
+
+}
+}
diff --git a/sdk/R/man/ArvadosFile.Rd b/sdk/R/man/ArvadosFile.Rd
index 514e9e846d..81c25af5f1 100644
--- a/sdk/R/man/ArvadosFile.Rd
+++ b/sdk/R/man/ArvadosFile.Rd
@@ -1,58 +1,475 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/ArvadosFile.R
-\name{ArvadosFile}
-\alias{ArvadosFile}
-\title{ArvadosFile}
-\description{
-ArvadosFile class represents a file inside Arvados collection.
-}
-\section{Usage}{
-
-\preformatted{file = ArvadosFile$new(name)}
-}
-
-\section{Arguments}{
-
-\describe{
- \item{name}{Name of the file.}
-}
-}
-
-\section{Methods}{
-
-\describe{
- \item{getName()}{Returns name of the file.}
- \item{getRelativePath()}{Returns file path relative to the root.}
- \item{read(contentType = "raw", offset = 0, length = 0)}{Read file content.}
- \item{write(content, contentType = "text/html")}{Write to file (override current content of the file).}
- \item{connection(rw)}{Get connection opened in "read" or "write" mode.}
- \item{flush()}{Write connections content to a file (override current content of the file).}
- \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
- \item{getSizeInBytes()}{Returns file size in bytes.}
- \item{move(destination)}{Moves file to a new location inside collection.}
- \item{copy(destination)}{Copies file to a new location inside collection.}
-}
-}
-
-\examples{
-\dontrun{
-myFile <- ArvadosFile$new("myFile")
-
-myFile$write("This is new file content")
-fileContent <- myFile$read()
-fileContent <- myFile$read("text")
-fileContent <- myFile$read("raw", offset = 8, length = 4)
-
-#Write a table:
-arvConnection <- myFile$connection("w")
-write.table(mytable, arvConnection)
-arvadosFile$flush()
-
-#Read a table:
-arvConnection <- myFile$connection("r")
-mytable <- read.table(arvConnection)
-
-myFile$move("newFolder/myFile")
-myFile$copy("newFolder/myFile")
-}
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosFile.R
+\name{ArvadosFile}
+\alias{ArvadosFile}
+\title{R6 Class Representing a ArvadosFile}
+\description{
+ArvadosFile class represents a file inside Arvados collection.
+}
+\examples{
+
+## ------------------------------------------------
+## Method `ArvadosFile$new`
+## ------------------------------------------------
+
+\dontrun{
+myFile <- ArvadosFile$new("myFile")
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$getName`
+## ------------------------------------------------
+
+\dontrun{
+arvadosFile$getName()
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$getFileListing`
+## ------------------------------------------------
+
+\dontrun{
+arvadosFile$getFileListing()
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$getSizeInBytes`
+## ------------------------------------------------
+
+\dontrun{
+arvadosFile$getSizeInBytes()
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$read`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+fileContent <- arvadosFile$read("text")
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$connection`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+arvConnection <- arvadosFile$connection("w")
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$flush`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+myFile$write("This is new file content")
+arvadosFile$flush()
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$write`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+myFile$write("This is new file content")
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$move`
+## ------------------------------------------------
+
+\dontrun{
+arvadosFile$move(newPath)
+}
+
+## ------------------------------------------------
+## Method `ArvadosFile$copy`
+## ------------------------------------------------
+
+\dontrun{
+arvadosFile$copy("NewName.format")
+}
+}
+\section{Methods}{
+\subsection{Public methods}{
+\itemize{
+\item \href{#method-ArvadosFile-new}{\code{ArvadosFile$new()}}
+\item \href{#method-ArvadosFile-getName}{\code{ArvadosFile$getName()}}
+\item \href{#method-ArvadosFile-getFileListing}{\code{ArvadosFile$getFileListing()}}
+\item \href{#method-ArvadosFile-getSizeInBytes}{\code{ArvadosFile$getSizeInBytes()}}
+\item \href{#method-ArvadosFile-get}{\code{ArvadosFile$get()}}
+\item \href{#method-ArvadosFile-getFirst}{\code{ArvadosFile$getFirst()}}
+\item \href{#method-ArvadosFile-getCollection}{\code{ArvadosFile$getCollection()}}
+\item \href{#method-ArvadosFile-setCollection}{\code{ArvadosFile$setCollection()}}
+\item \href{#method-ArvadosFile-getRelativePath}{\code{ArvadosFile$getRelativePath()}}
+\item \href{#method-ArvadosFile-getParent}{\code{ArvadosFile$getParent()}}
+\item \href{#method-ArvadosFile-setParent}{\code{ArvadosFile$setParent()}}
+\item \href{#method-ArvadosFile-read}{\code{ArvadosFile$read()}}
+\item \href{#method-ArvadosFile-connection}{\code{ArvadosFile$connection()}}
+\item \href{#method-ArvadosFile-flush}{\code{ArvadosFile$flush()}}
+\item \href{#method-ArvadosFile-write}{\code{ArvadosFile$write()}}
+\item \href{#method-ArvadosFile-move}{\code{ArvadosFile$move()}}
+\item \href{#method-ArvadosFile-copy}{\code{ArvadosFile$copy()}}
+\item \href{#method-ArvadosFile-duplicate}{\code{ArvadosFile$duplicate()}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-new}{}}}
+\subsection{Method \code{new()}}{
+Initialize new enviroment.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$new(name)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the new enviroment.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Returns}{
+A new `ArvadosFile` object.
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+myFile <- ArvadosFile$new("myFile")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getName}{}}}
+\subsection{Method \code{getName()}}{
+Returns name of the file.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getName()}\if{html}{\out{
}}
+}
+
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arvadosFile$getName()
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getFileListing}{}}}
+\subsection{Method \code{getFileListing()}}{
+Returns collections file content as character vector.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getFileListing(fullpath = TRUE)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{fullPath}}{Checking if TRUE.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arvadosFile$getFileListing()
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getSizeInBytes}{}}}
+\subsection{Method \code{getSizeInBytes()}}{
+Returns collections content size in bytes.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getSizeInBytes()}\if{html}{\out{
}}
+}
+
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arvadosFile$getSizeInBytes()
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-get}{}}}
+\subsection{Method \code{get()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$get(fileLikeObjectName)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getFirst}{}}}
+\subsection{Method \code{getFirst()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getFirst()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getCollection}{}}}
+\subsection{Method \code{getCollection()}}{
+Returns collection UUID.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getCollection()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-setCollection}{}}}
+\subsection{Method \code{setCollection()}}{
+Sets new collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$setCollection(collection, setRecursively = TRUE)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getRelativePath}{}}}
+\subsection{Method \code{getRelativePath()}}{
+Returns file path relative to the root.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getRelativePath()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-getParent}{}}}
+\subsection{Method \code{getParent()}}{
+Returns project UUID.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$getParent()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-setParent}{}}}
+\subsection{Method \code{setParent()}}{
+Sets project collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$setParent(newParent)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-read}{}}}
+\subsection{Method \code{read()}}{
+Read file content.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$read(contentType = "raw", offset = 0, length = 0)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{contentType}}{Type of content. Possible is "text", "raw".}
+
+\item{\code{offset}}{Describes the location of a piece of data compared to another location}
+
+\item{\code{length}}{Length of content}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+fileContent <- arvadosFile$read("text")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-connection}{}}}
+\subsection{Method \code{connection()}}{
+Get connection opened in "read" or "write" mode.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$connection(rw)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{rw}}{Type of connection.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+arvConnection <- arvadosFile$connection("w")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-flush}{}}}
+\subsection{Method \code{flush()}}{
+Write connections content to a file or override current content of the file.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$flush()}\if{html}{\out{
}}
+}
+
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+myFile$write("This is new file content")
+arvadosFile$flush()
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-write}{}}}
+\subsection{Method \code{write()}}{
+Write to file or override current content of the file.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$write(content, contentType = "text/html")}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{content}}{File to write.}
+
+\item{\code{contentType}}{Type of content. Possible is "text", "raw".}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+arvadosFile <- collection$get(fileName)
+myFile$write("This is new file content")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-move}{}}}
+\subsection{Method \code{move()}}{
+Moves file to a new location inside collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$move(destination)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{destination}}{Path to new folder.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arvadosFile$move(newPath)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-copy}{}}}
+\subsection{Method \code{copy()}}{
+Copies file to a new location inside collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$copy(destination)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{destination}}{Path to new folder.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arvadosFile$copy("NewName.format")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-ArvadosFile-duplicate}{}}}
+\subsection{Method \code{duplicate()}}{
+Duplicate file and gives it a new name.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{ArvadosFile$duplicate(newName = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{newName}}{New name for duplicated file.}
+}
+\if{html}{\out{
}}
+}
+}
+}
diff --git a/sdk/R/man/ArvadosR.Rd b/sdk/R/man/ArvadosR.Rd
new file mode 100644
index 0000000000..51edb8b138
--- /dev/null
+++ b/sdk/R/man/ArvadosR.Rd
@@ -0,0 +1,23 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosR.R
+\name{ArvadosR}
+\alias{ArvadosR}
+\title{ArvadosR}
+\description{
+Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data. With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources.
+}
+\seealso{
+\itemize{
+\item https://arvados.org
+\item https://doc.arvados.org/sdk/R/index.html
+\item https://git.arvados.org/arvados.git/tree/HEAD:/sdk/R}
+}
+\author{
+\itemize{
+\item Lucas Di Pentima
+\item Ward Vandewege
+\item Fuad Muhic
+\item Peter Amstutz
+\item Aneta Stanczyk
+\item Piotr Nowosielski}
+}
diff --git a/sdk/R/man/Collection.Rd b/sdk/R/man/Collection.Rd
index fbe6038664..0de9a842e6 100644
--- a/sdk/R/man/Collection.Rd
+++ b/sdk/R/man/Collection.Rd
@@ -1,49 +1,480 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Collection.R
-\name{Collection}
-\alias{Collection}
-\title{Collection}
-\description{
-Collection class provides interface for working with Arvados collections.
-}
-\section{Usage}{
-
-\preformatted{collection = Collection$new(arv, uuid)}
-}
-
-\section{Arguments}{
-
-\describe{
- \item{arv}{Arvados object.}
- \item{uuid}{UUID of a collection.}
-}
-}
-
-\section{Methods}{
-
-\describe{
- \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
- \item{create(files)}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
- \item{remove(fileNames)}{Remove one or more files from the collection.}
- \item{move(content, destination)}{Moves ArvadosFile or Subcollection to another location in the collection.}
- \item{copy(content, destination)}{Copies ArvadosFile or Subcollection to another location in the collection.}
- \item{getFileListing()}{Returns collections file content as character vector.}
- \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
-}
-}
-
-\examples{
-\dontrun{
-arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-collection <- Collection$new(arv, "uuid")
-
-createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
-
-collection$remove("location/to/my/file.cpp")
-
-collection$move("folder/file.cpp", "file.cpp")
-
-arvadosFile <- collection$get("location/to/my/file.cpp")
-arvadosSubcollection <- collection$get("location/to/my/directory/")
-}
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Collection.R
+\name{Collection}
+\alias{Collection}
+\title{R6 Class Representing Arvados Collection}
+\description{
+Collection class provides interface for working with Arvados collections,
+for exaplme actions like creating, updating, moving or removing are possible.
+}
+\examples{
+
+## ------------------------------------------------
+## Method `Collection$new`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, CollectionUUID)
+}
+
+## ------------------------------------------------
+## Method `Collection$readArvFile`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table
+readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text
+readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata
+readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta
+readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary, only numbers
+readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary with factor or text
+}
+
+## ------------------------------------------------
+## Method `Collection$writeFile`
+## ------------------------------------------------
+
+\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+writeFile <- collection$writeFile(name = "myoutput.csv", file = file, fileFormat = "csv", istable = NULL, collectionUUID = collectionUUID) # csv
+writeFile <- collection$writeFile(name = "myoutput.tsv", file = file, fileFormat = "tsv", istable = NULL, collectionUUID = collectionUUID) # tsv
+writeFile <- collection$writeFile(name = "myoutput.fasta", file = file, fileFormat = "fasta", istable = NULL, collectionUUID = collectionUUID) # fasta
+writeFile <- collection$writeFile(name = "myoutputtable.txt", file = file, fileFormat = "txt", istable = "yes", collectionUUID = collectionUUID) # txt table
+writeFile <- collection$writeFile(name = "myoutputtext.txt", file = file, fileFormat = "txt", istable = "no", collectionUUID = collectionUUID) # txt text
+writeFile <- collection$writeFile(name = "myoutputbinary.dat", file = file, fileFormat = "dat", collectionUUID = collectionUUID) # binary
+writeFile <- collection$writeFile(name = "myoutputxlsx.xlsx", file = file, fileFormat = "xlsx", collectionUUID = collectionUUID) # xlsx
+}
+
+## ------------------------------------------------
+## Method `Collection$create`
+## ------------------------------------------------
+
+\dontrun{
+collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list("ROX37196928443768648" = "ROX37742976443830153"))
+}
+
+## ------------------------------------------------
+## Method `Collection$remove`
+## ------------------------------------------------
+
+\dontrun{
+collection$remove(fileName.format)
+}
+
+## ------------------------------------------------
+## Method `Collection$move`
+## ------------------------------------------------
+
+\dontrun{
+collection$move("fileName.format", path)
+}
+
+## ------------------------------------------------
+## Method `Collection$copy`
+## ------------------------------------------------
+
+\dontrun{
+copied <- collection$copy("oldName.format", "newName.format")
+}
+
+## ------------------------------------------------
+## Method `Collection$refresh`
+## ------------------------------------------------
+
+\dontrun{
+collection$refresh()
+}
+
+## ------------------------------------------------
+## Method `Collection$getFileListing`
+## ------------------------------------------------
+
+\dontrun{
+list <- collection$getFileListing()
+}
+
+## ------------------------------------------------
+## Method `Collection$get`
+## ------------------------------------------------
+
+\dontrun{
+arvadosFile <- collection$get(fileName)
+}
+}
+\seealso{
+https://git.arvados.org/arvados.git/tree/HEAD:/sdk/R
+}
+\section{Public fields}{
+\if{html}{\out{}}
+\describe{
+\item{\code{uuid}}{Autentic for Collection UUID.}
+}
+\if{html}{\out{
}}
+}
+\section{Methods}{
+\subsection{Public methods}{
+\itemize{
+\item \href{#method-Collection-new}{\code{Collection$new()}}
+\item \href{#method-Collection-add}{\code{Collection$add()}}
+\item \href{#method-Collection-readArvFile}{\code{Collection$readArvFile()}}
+\item \href{#method-Collection-writeFile}{\code{Collection$writeFile()}}
+\item \href{#method-Collection-create}{\code{Collection$create()}}
+\item \href{#method-Collection-remove}{\code{Collection$remove()}}
+\item \href{#method-Collection-move}{\code{Collection$move()}}
+\item \href{#method-Collection-copy}{\code{Collection$copy()}}
+\item \href{#method-Collection-refresh}{\code{Collection$refresh()}}
+\item \href{#method-Collection-getFileListing}{\code{Collection$getFileListing()}}
+\item \href{#method-Collection-get}{\code{Collection$get()}}
+\item \href{#method-Collection-getRESTService}{\code{Collection$getRESTService()}}
+\item \href{#method-Collection-setRESTService}{\code{Collection$setRESTService()}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-new}{}}}
+\subsection{Method \code{new()}}{
+Initialize new enviroment.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$new(api, uuid)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{api}}{Arvados enviroment.}
+
+\item{\code{uuid}}{The UUID Autentic for Collection UUID.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Returns}{
+A new `Collection` object.
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, CollectionUUID)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-add}{}}}
+\subsection{Method \code{add()}}{
+Adds ArvadosFile or Subcollection specified by content to the collection. Used only with ArvadosFile or Subcollection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$add(content, relativePath = "")}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{content}}{Content to be added.}
+
+\item{\code{relativePath}}{Path to add content.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-readArvFile}{}}}
+\subsection{Method \code{readArvFile()}}{
+Read file content.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$readArvFile(
+ file,
+ con,
+ sep = ",",
+ istable = NULL,
+ fileclass = "SeqFastadna",
+ Ncol = NULL,
+ Nrow = NULL,
+ wantedFunction = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{file}}{Name of the file.}
+
+\item{\code{sep}}{Separator used in reading tsv, csv file format.}
+
+\item{\code{istable}}{Used in reading txt file to check if the file is table or not.}
+
+\item{\code{fileclass}}{Used in reading fasta file to set file class.}
+
+\item{\code{Ncol}}{Used in reading binary file to set numbers of columns in data.frame.}
+
+\item{\code{Nrow}}{Used in reading binary file to set numbers of rows in data.frame size.}
+
+\item{\code{col}}{Collection from which the file is read.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+readFile <- collection$readArvFile(arvadosFile, istable = 'yes') # table
+readFile <- collection$readArvFile(arvadosFile, istable = 'no') # text
+readFile <- collection$readArvFile(arvadosFile) # xlsx, csv, tsv, rds, rdata
+readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta') # fasta
+readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32) # binary, only numbers
+readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = "factor") # binary with factor or text
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-writeFile}{}}}
+\subsection{Method \code{writeFile()}}{
+Write file content
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$writeFile(
+ name,
+ file,
+ collectionUUID,
+ fileFormat,
+ istable = NULL,
+ seqName = NULL
+)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the file.}
+
+\item{\code{file}}{File to be saved.}
+
+\item{\code{istable}}{Used in writing txt file to check if the file is table or not.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- Collection$new(arv, collectionUUID)
+writeFile <- collection$writeFile(name = "myoutput.csv", file = file, fileFormat = "csv", istable = NULL, collectionUUID = collectionUUID) # csv
+writeFile <- collection$writeFile(name = "myoutput.tsv", file = file, fileFormat = "tsv", istable = NULL, collectionUUID = collectionUUID) # tsv
+writeFile <- collection$writeFile(name = "myoutput.fasta", file = file, fileFormat = "fasta", istable = NULL, collectionUUID = collectionUUID) # fasta
+writeFile <- collection$writeFile(name = "myoutputtable.txt", file = file, fileFormat = "txt", istable = "yes", collectionUUID = collectionUUID) # txt table
+writeFile <- collection$writeFile(name = "myoutputtext.txt", file = file, fileFormat = "txt", istable = "no", collectionUUID = collectionUUID) # txt text
+writeFile <- collection$writeFile(name = "myoutputbinary.dat", file = file, fileFormat = "dat", collectionUUID = collectionUUID) # binary
+writeFile <- collection$writeFile(name = "myoutputxlsx.xlsx", file = file, fileFormat = "xlsx", collectionUUID = collectionUUID) # xlsx
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-create}{}}}
+\subsection{Method \code{create()}}{
+Creates one or more ArvadosFiles and adds them to the collection at specified path.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$create(files)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{files}}{Content to be created.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list("ROX37196928443768648" = "ROX37742976443830153"))
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-remove}{}}}
+\subsection{Method \code{remove()}}{
+Remove one or more files from the collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$remove(paths)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{paths}}{Content to be removed.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection$remove(fileName.format)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-move}{}}}
+\subsection{Method \code{move()}}{
+Moves ArvadosFile or Subcollection to another location in the collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$move(content, destination)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{content}}{Content to be moved.}
+
+\item{\code{destination}}{Path to move content.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection$move("fileName.format", path)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-copy}{}}}
+\subsection{Method \code{copy()}}{
+Copies ArvadosFile or Subcollection to another location in the collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$copy(content, destination)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{content}}{Content to be moved.}
+
+\item{\code{destination}}{Path to move content.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+copied <- collection$copy("oldName.format", "newName.format")
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-refresh}{}}}
+\subsection{Method \code{refresh()}}{
+Refreshes the environment.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$refresh()}\if{html}{\out{
}}
+}
+
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+collection$refresh()
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-getFileListing}{}}}
+\subsection{Method \code{getFileListing()}}{
+Returns collections file content as character vector.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$getFileListing()}\if{html}{\out{
}}
+}
+
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+list <- collection$getFileListing()
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-get}{}}}
+\subsection{Method \code{get()}}{
+If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$get(relativePath)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{relativePath}}{Path from content is taken.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Examples}{
+\if{html}{\out{}}
+\preformatted{\dontrun{
+arvadosFile <- collection$get(fileName)
+}
+}
+\if{html}{\out{
}}
+
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-getRESTService}{}}}
+\subsection{Method \code{getRESTService()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$getRESTService()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Collection-setRESTService}{}}}
+\subsection{Method \code{setRESTService()}}{
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Collection$setRESTService(newRESTService)}\if{html}{\out{
}}
+}
+
+}
+}
diff --git a/sdk/R/man/Subcollection.Rd b/sdk/R/man/Subcollection.Rd
index 0b27a8bc43..9faf0c279e 100644
--- a/sdk/R/man/Subcollection.Rd
+++ b/sdk/R/man/Subcollection.Rd
@@ -1,49 +1,250 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Subcollection.R
-\name{Subcollection}
-\alias{Subcollection}
-\title{Subcollection}
-\description{
-Subcollection class represents a folder inside Arvados collection.
-It is essentially a composite of arvadosFiles and other subcollections.
-}
-\section{Usage}{
-
-\preformatted{subcollection = Subcollection$new(name)}
-}
-
-\section{Arguments}{
-
-\describe{
- \item{name}{Name of the subcollection.}
-}
-}
-
-\section{Methods}{
-
-\describe{
- \item{getName()}{Returns name of the subcollection.}
- \item{getRelativePath()}{Returns subcollection path relative to the root.}
- \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the subcollection.}
- \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
- \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
- \item{getFileListing()}{Returns subcollections file content as character vector.}
- \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
- \item{move(destination)}{Moves subcollection to a new location inside collection.}
- \item{copy(destination)}{Copies subcollection to a new location inside collection.}
-}
-}
-
-\examples{
-\dontrun{
-myFolder <- Subcollection$new("myFolder")
-myFile <- ArvadosFile$new("myFile")
-
-myFolder$add(myFile)
-myFolder$get("myFile")
-myFolder$remove("myFile")
-
-myFolder$move("newLocation/myFolder")
-myFolder$copy("newLocation/myFolder")
-}
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Subcollection.R
+\name{Subcollection}
+\alias{Subcollection}
+\title{R6 Class Representing a Subcollection}
+\description{
+Subcollection class represents a folder inside Arvados collection.
+It is essentially a composite of arvadosFiles and other subcollections.
+}
+\section{Methods}{
+\subsection{Public methods}{
+\itemize{
+\item \href{#method-Subcollection-new}{\code{Subcollection$new()}}
+\item \href{#method-Subcollection-getName}{\code{Subcollection$getName()}}
+\item \href{#method-Subcollection-getRelativePath}{\code{Subcollection$getRelativePath()}}
+\item \href{#method-Subcollection-add}{\code{Subcollection$add()}}
+\item \href{#method-Subcollection-remove}{\code{Subcollection$remove()}}
+\item \href{#method-Subcollection-getFileListing}{\code{Subcollection$getFileListing()}}
+\item \href{#method-Subcollection-getSizeInBytes}{\code{Subcollection$getSizeInBytes()}}
+\item \href{#method-Subcollection-move}{\code{Subcollection$move()}}
+\item \href{#method-Subcollection-copy}{\code{Subcollection$copy()}}
+\item \href{#method-Subcollection-duplicate}{\code{Subcollection$duplicate()}}
+\item \href{#method-Subcollection-get}{\code{Subcollection$get()}}
+\item \href{#method-Subcollection-getFirst}{\code{Subcollection$getFirst()}}
+\item \href{#method-Subcollection-setCollection}{\code{Subcollection$setCollection()}}
+\item \href{#method-Subcollection-getCollection}{\code{Subcollection$getCollection()}}
+\item \href{#method-Subcollection-getParent}{\code{Subcollection$getParent()}}
+\item \href{#method-Subcollection-setParent}{\code{Subcollection$setParent()}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-new}{}}}
+\subsection{Method \code{new()}}{
+Initialize new enviroment.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$new(name)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the new enviroment.}
+}
+\if{html}{\out{
}}
+}
+\subsection{Returns}{
+A new `Subcollection` object.
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getName}{}}}
+\subsection{Method \code{getName()}}{
+Returns name of the file.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getName()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getRelativePath}{}}}
+\subsection{Method \code{getRelativePath()}}{
+Returns Subcollection's path relative to the root.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getRelativePath()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-add}{}}}
+\subsection{Method \code{add()}}{
+Adds ArvadosFile or Subcollection specified by content to the Subcollection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$add(content)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{content}}{Content to be added.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-remove}{}}}
+\subsection{Method \code{remove()}}{
+Removes ArvadosFile or Subcollection specified by name from the Subcollection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$remove(name)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the file to be removed.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getFileListing}{}}}
+\subsection{Method \code{getFileListing()}}{
+Returns Subcollections file content as character vector.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getFileListing(fullPath = TRUE)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{fullPath}}{Checking if the path to file exists.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getSizeInBytes}{}}}
+\subsection{Method \code{getSizeInBytes()}}{
+Returns subcollections content size in bytes.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getSizeInBytes()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-move}{}}}
+\subsection{Method \code{move()}}{
+Moves Subcollection to a new location inside collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$move(destination)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{destination}}{Path to move the file.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-copy}{}}}
+\subsection{Method \code{copy()}}{
+Copies Subcollection to a new location inside collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$copy(destination)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{destination}}{Path to copy the file.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-duplicate}{}}}
+\subsection{Method \code{duplicate()}}{
+Duplicate Subcollection and gives it a new name.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$duplicate(newName = NULL)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{newName}}{New name for duplicated file.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-get}{}}}
+\subsection{Method \code{get()}}{
+If name is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$get(name)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{}}
+\describe{
+\item{\code{name}}{Name of the file.}
+}
+\if{html}{\out{
}}
+}
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getFirst}{}}}
+\subsection{Method \code{getFirst()}}{
+Returns files in Subcollection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getFirst()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-setCollection}{}}}
+\subsection{Method \code{setCollection()}}{
+Sets Collection by its UUID.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$setCollection(collection, setRecursively = TRUE)}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getCollection}{}}}
+\subsection{Method \code{getCollection()}}{
+Returns Collection of Subcollection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getCollection()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-getParent}{}}}
+\subsection{Method \code{getParent()}}{
+Returns Collection UUID.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$getParent()}\if{html}{\out{
}}
+}
+
+}
+\if{html}{\out{ }}
+\if{html}{\out{ }}
+\if{latex}{\out{\hypertarget{method-Subcollection-setParent}{}}}
+\subsection{Method \code{setParent()}}{
+Sets new Collection.
+\subsection{Usage}{
+\if{html}{\out{}}\preformatted{Subcollection$setParent(newParent)}\if{html}{\out{
}}
+}
+
+}
+}
diff --git a/sdk/R/man/api_client_authorizations.create.Rd b/sdk/R/man/api_client_authorizations.create.Rd
deleted file mode 100644
index e322419469..0000000000
--- a/sdk/R/man/api_client_authorizations.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.create}
-\alias{api_client_authorizations.create}
-\title{api_client_authorizations.create}
-\usage{
-arv$api_client_authorizations.create(apiclientauthorization,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{apiClientAuthorization}{ApiClientAuthorization object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-ApiClientAuthorization object.
-}
-\description{
-api_client_authorizations.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_client_authorizations.create_system_auth.Rd b/sdk/R/man/api_client_authorizations.create_system_auth.Rd
deleted file mode 100644
index 3eb172aabf..0000000000
--- a/sdk/R/man/api_client_authorizations.create_system_auth.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.create_system_auth}
-\alias{api_client_authorizations.create_system_auth}
-\title{api_client_authorizations.create_system_auth}
-\usage{
-arv$api_client_authorizations.create_system_auth(api_client_id = NULL,
- scopes = NULL)
-}
-\arguments{
-\item{api_client_id}{}
-
-\item{scopes}{}
-}
-\value{
-ApiClientAuthorization object.
-}
-\description{
-api_client_authorizations.create_system_auth is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_client_authorizations.current.Rd b/sdk/R/man/api_client_authorizations.current.Rd
deleted file mode 100644
index c28623788b..0000000000
--- a/sdk/R/man/api_client_authorizations.current.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.current}
-\alias{api_client_authorizations.current}
-\title{api_client_authorizations.current}
-\usage{
-arv$api_client_authorizations.current(NULL)
-}
-\value{
-ApiClientAuthorization object.
-}
-\description{
-api_client_authorizations.current is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_client_authorizations.delete.Rd b/sdk/R/man/api_client_authorizations.delete.Rd
deleted file mode 100644
index 054cc79662..0000000000
--- a/sdk/R/man/api_client_authorizations.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.delete}
-\alias{api_client_authorizations.delete}
-\title{api_client_authorizations.delete}
-\usage{
-arv$api_client_authorizations.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the ApiClientAuthorization in question.}
-}
-\value{
-ApiClientAuthorization object.
-}
-\description{
-api_client_authorizations.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_client_authorizations.get.Rd b/sdk/R/man/api_client_authorizations.get.Rd
deleted file mode 100644
index 3f5b630a41..0000000000
--- a/sdk/R/man/api_client_authorizations.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.get}
-\alias{api_client_authorizations.get}
-\title{api_client_authorizations.get}
-\usage{
-arv$api_client_authorizations.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the ApiClientAuthorization in question.}
-}
-\value{
-ApiClientAuthorization object.
-}
-\description{
-api_client_authorizations.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_client_authorizations.list.Rd b/sdk/R/man/api_client_authorizations.list.Rd
deleted file mode 100644
index 7c8ae69cd5..0000000000
--- a/sdk/R/man/api_client_authorizations.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.list}
-\alias{api_client_authorizations.list}
-\title{api_client_authorizations.list}
-\usage{
-arv$api_client_authorizations.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-ApiClientAuthorizationList object.
-}
-\description{
-api_client_authorizations.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_client_authorizations.update.Rd b/sdk/R/man/api_client_authorizations.update.Rd
deleted file mode 100644
index e6380ccddc..0000000000
--- a/sdk/R/man/api_client_authorizations.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_client_authorizations.update}
-\alias{api_client_authorizations.update}
-\title{api_client_authorizations.update}
-\usage{
-arv$api_client_authorizations.update(apiclientauthorization,
- uuid)
-}
-\arguments{
-\item{apiClientAuthorization}{ApiClientAuthorization object.}
-
-\item{uuid}{The UUID of the ApiClientAuthorization in question.}
-}
-\value{
-ApiClientAuthorization object.
-}
-\description{
-api_client_authorizations.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_clients.create.Rd b/sdk/R/man/api_clients.create.Rd
deleted file mode 100644
index 260116897a..0000000000
--- a/sdk/R/man/api_clients.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_clients.create}
-\alias{api_clients.create}
-\title{api_clients.create}
-\usage{
-arv$api_clients.create(apiclient,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{apiClient}{ApiClient object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-ApiClient object.
-}
-\description{
-api_clients.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_clients.delete.Rd b/sdk/R/man/api_clients.delete.Rd
deleted file mode 100644
index 90eaa99115..0000000000
--- a/sdk/R/man/api_clients.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_clients.delete}
-\alias{api_clients.delete}
-\title{api_clients.delete}
-\usage{
-arv$api_clients.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the ApiClient in question.}
-}
-\value{
-ApiClient object.
-}
-\description{
-api_clients.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_clients.get.Rd b/sdk/R/man/api_clients.get.Rd
deleted file mode 100644
index 4a1b6c06b1..0000000000
--- a/sdk/R/man/api_clients.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_clients.get}
-\alias{api_clients.get}
-\title{api_clients.get}
-\usage{
-arv$api_clients.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the ApiClient in question.}
-}
-\value{
-ApiClient object.
-}
-\description{
-api_clients.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_clients.list.Rd b/sdk/R/man/api_clients.list.Rd
deleted file mode 100644
index 0679c9c12e..0000000000
--- a/sdk/R/man/api_clients.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_clients.list}
-\alias{api_clients.list}
-\title{api_clients.list}
-\usage{
-arv$api_clients.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-ApiClientList object.
-}
-\description{
-api_clients.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/api_clients.update.Rd b/sdk/R/man/api_clients.update.Rd
deleted file mode 100644
index a37e533677..0000000000
--- a/sdk/R/man/api_clients.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{api_clients.update}
-\alias{api_clients.update}
-\title{api_clients.update}
-\usage{
-arv$api_clients.update(apiclient,
- uuid)
-}
-\arguments{
-\item{apiClient}{ApiClient object.}
-
-\item{uuid}{The UUID of the ApiClient in question.}
-}
-\value{
-ApiClient object.
-}
-\description{
-api_clients.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/authorized_keys.create.Rd b/sdk/R/man/authorized_keys.create.Rd
deleted file mode 100644
index e0d226af3d..0000000000
--- a/sdk/R/man/authorized_keys.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{authorized_keys.create}
-\alias{authorized_keys.create}
-\title{authorized_keys.create}
-\usage{
-arv$authorized_keys.create(authorizedkey,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{authorizedKey}{AuthorizedKey object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-AuthorizedKey object.
-}
-\description{
-authorized_keys.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/authorized_keys.delete.Rd b/sdk/R/man/authorized_keys.delete.Rd
deleted file mode 100644
index db1f0e7c48..0000000000
--- a/sdk/R/man/authorized_keys.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{authorized_keys.delete}
-\alias{authorized_keys.delete}
-\title{authorized_keys.delete}
-\usage{
-arv$authorized_keys.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the AuthorizedKey in question.}
-}
-\value{
-AuthorizedKey object.
-}
-\description{
-authorized_keys.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/authorized_keys.get.Rd b/sdk/R/man/authorized_keys.get.Rd
deleted file mode 100644
index 31a2dd3f30..0000000000
--- a/sdk/R/man/authorized_keys.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{authorized_keys.get}
-\alias{authorized_keys.get}
-\title{authorized_keys.get}
-\usage{
-arv$authorized_keys.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the AuthorizedKey in question.}
-}
-\value{
-AuthorizedKey object.
-}
-\description{
-authorized_keys.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/authorized_keys.list.Rd b/sdk/R/man/authorized_keys.list.Rd
deleted file mode 100644
index cd19bc60c0..0000000000
--- a/sdk/R/man/authorized_keys.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{authorized_keys.list}
-\alias{authorized_keys.list}
-\title{authorized_keys.list}
-\usage{
-arv$authorized_keys.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-AuthorizedKeyList object.
-}
-\description{
-authorized_keys.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/authorized_keys.update.Rd b/sdk/R/man/authorized_keys.update.Rd
deleted file mode 100644
index 65d93d0732..0000000000
--- a/sdk/R/man/authorized_keys.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{authorized_keys.update}
-\alias{authorized_keys.update}
-\title{authorized_keys.update}
-\usage{
-arv$authorized_keys.update(authorizedkey,
- uuid)
-}
-\arguments{
-\item{authorizedKey}{AuthorizedKey object.}
-
-\item{uuid}{The UUID of the AuthorizedKey in question.}
-}
-\value{
-AuthorizedKey object.
-}
-\description{
-authorized_keys.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.create.Rd b/sdk/R/man/collections.create.Rd
deleted file mode 100644
index af8e3984b6..0000000000
--- a/sdk/R/man/collections.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.create}
-\alias{collections.create}
-\title{collections.create}
-\usage{
-arv$collections.create(collection,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{collection}{Collection object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Collection object.
-}
-\description{
-collections.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.delete.Rd b/sdk/R/man/collections.delete.Rd
deleted file mode 100644
index 28b3543e4a..0000000000
--- a/sdk/R/man/collections.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.delete}
-\alias{collections.delete}
-\title{collections.delete}
-\usage{
-arv$collections.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Collection in question.}
-}
-\value{
-Collection object.
-}
-\description{
-collections.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.get.Rd b/sdk/R/man/collections.get.Rd
deleted file mode 100644
index 3878aaf8c8..0000000000
--- a/sdk/R/man/collections.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.get}
-\alias{collections.get}
-\title{collections.get}
-\usage{
-arv$collections.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Collection in question.}
-}
-\value{
-Collection object.
-}
-\description{
-collections.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.list.Rd b/sdk/R/man/collections.list.Rd
deleted file mode 100644
index 87f6f78e33..0000000000
--- a/sdk/R/man/collections.list.Rd
+++ /dev/null
@@ -1,36 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.list}
-\alias{collections.list}
-\title{collections.list}
-\usage{
-arv$collections.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", include_trash = NULL)
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-
-\item{include_trash}{Include collections whose is_trashed attribute is true.}
-}
-\value{
-CollectionList object.
-}
-\description{
-collections.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.provenance.Rd b/sdk/R/man/collections.provenance.Rd
deleted file mode 100644
index 001a7b47c7..0000000000
--- a/sdk/R/man/collections.provenance.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.provenance}
-\alias{collections.provenance}
-\title{collections.provenance}
-\usage{
-arv$collections.provenance(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Collection object.
-}
-\description{
-collections.provenance is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.trash.Rd b/sdk/R/man/collections.trash.Rd
deleted file mode 100644
index 4862109b9f..0000000000
--- a/sdk/R/man/collections.trash.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.trash}
-\alias{collections.trash}
-\title{collections.trash}
-\usage{
-arv$collections.trash(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Collection object.
-}
-\description{
-collections.trash is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.untrash.Rd b/sdk/R/man/collections.untrash.Rd
deleted file mode 100644
index c41bc3d904..0000000000
--- a/sdk/R/man/collections.untrash.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.untrash}
-\alias{collections.untrash}
-\title{collections.untrash}
-\usage{
-arv$collections.untrash(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Collection object.
-}
-\description{
-collections.untrash is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.update.Rd b/sdk/R/man/collections.update.Rd
deleted file mode 100644
index c9b201cb4d..0000000000
--- a/sdk/R/man/collections.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.update}
-\alias{collections.update}
-\title{collections.update}
-\usage{
-arv$collections.update(collection,
- uuid)
-}
-\arguments{
-\item{collection}{Collection object.}
-
-\item{uuid}{The UUID of the Collection in question.}
-}
-\value{
-Collection object.
-}
-\description{
-collections.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/collections.used_by.Rd b/sdk/R/man/collections.used_by.Rd
deleted file mode 100644
index 53b8e493f7..0000000000
--- a/sdk/R/man/collections.used_by.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{collections.used_by}
-\alias{collections.used_by}
-\title{collections.used_by}
-\usage{
-arv$collections.used_by(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Collection object.
-}
-\description{
-collections.used_by is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/container_requests.create.Rd b/sdk/R/man/container_requests.create.Rd
deleted file mode 100644
index e114d322ed..0000000000
--- a/sdk/R/man/container_requests.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{container_requests.create}
-\alias{container_requests.create}
-\title{container_requests.create}
-\usage{
-arv$container_requests.create(containerrequest,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{containerRequest}{ContainerRequest object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-ContainerRequest object.
-}
-\description{
-container_requests.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/container_requests.delete.Rd b/sdk/R/man/container_requests.delete.Rd
deleted file mode 100644
index 905739bcd3..0000000000
--- a/sdk/R/man/container_requests.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{container_requests.delete}
-\alias{container_requests.delete}
-\title{container_requests.delete}
-\usage{
-arv$container_requests.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the ContainerRequest in question.}
-}
-\value{
-ContainerRequest object.
-}
-\description{
-container_requests.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/container_requests.get.Rd b/sdk/R/man/container_requests.get.Rd
deleted file mode 100644
index 54fe5d4c18..0000000000
--- a/sdk/R/man/container_requests.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{container_requests.get}
-\alias{container_requests.get}
-\title{container_requests.get}
-\usage{
-arv$container_requests.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the ContainerRequest in question.}
-}
-\value{
-ContainerRequest object.
-}
-\description{
-container_requests.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/container_requests.list.Rd b/sdk/R/man/container_requests.list.Rd
deleted file mode 100644
index 9c2412beed..0000000000
--- a/sdk/R/man/container_requests.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{container_requests.list}
-\alias{container_requests.list}
-\title{container_requests.list}
-\usage{
-arv$container_requests.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-ContainerRequestList object.
-}
-\description{
-container_requests.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/container_requests.update.Rd b/sdk/R/man/container_requests.update.Rd
deleted file mode 100644
index 063417b3ed..0000000000
--- a/sdk/R/man/container_requests.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{container_requests.update}
-\alias{container_requests.update}
-\title{container_requests.update}
-\usage{
-arv$container_requests.update(containerrequest,
- uuid)
-}
-\arguments{
-\item{containerRequest}{ContainerRequest object.}
-
-\item{uuid}{The UUID of the ContainerRequest in question.}
-}
-\value{
-ContainerRequest object.
-}
-\description{
-container_requests.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.auth.Rd b/sdk/R/man/containers.auth.Rd
deleted file mode 100644
index a594d2f196..0000000000
--- a/sdk/R/man/containers.auth.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.auth}
-\alias{containers.auth}
-\title{containers.auth}
-\usage{
-arv$containers.auth(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Container object.
-}
-\description{
-containers.auth is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.create.Rd b/sdk/R/man/containers.create.Rd
deleted file mode 100644
index 4ce25bb831..0000000000
--- a/sdk/R/man/containers.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.create}
-\alias{containers.create}
-\title{containers.create}
-\usage{
-arv$containers.create(container,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{container}{Container object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Container object.
-}
-\description{
-containers.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.current.Rd b/sdk/R/man/containers.current.Rd
deleted file mode 100644
index 0f6ad4eb96..0000000000
--- a/sdk/R/man/containers.current.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.current}
-\alias{containers.current}
-\title{containers.current}
-\usage{
-arv$containers.current(NULL)
-}
-\value{
-Container object.
-}
-\description{
-containers.current is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.delete.Rd b/sdk/R/man/containers.delete.Rd
deleted file mode 100644
index e2e3cd7fc4..0000000000
--- a/sdk/R/man/containers.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.delete}
-\alias{containers.delete}
-\title{containers.delete}
-\usage{
-arv$containers.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Container in question.}
-}
-\value{
-Container object.
-}
-\description{
-containers.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.get.Rd b/sdk/R/man/containers.get.Rd
deleted file mode 100644
index 05d97d3d91..0000000000
--- a/sdk/R/man/containers.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.get}
-\alias{containers.get}
-\title{containers.get}
-\usage{
-arv$containers.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Container in question.}
-}
-\value{
-Container object.
-}
-\description{
-containers.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.list.Rd b/sdk/R/man/containers.list.Rd
deleted file mode 100644
index d445796135..0000000000
--- a/sdk/R/man/containers.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.list}
-\alias{containers.list}
-\title{containers.list}
-\usage{
-arv$containers.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-ContainerList object.
-}
-\description{
-containers.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.lock.Rd b/sdk/R/man/containers.lock.Rd
deleted file mode 100644
index 72bcdf0f81..0000000000
--- a/sdk/R/man/containers.lock.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.lock}
-\alias{containers.lock}
-\title{containers.lock}
-\usage{
-arv$containers.lock(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Container object.
-}
-\description{
-containers.lock is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.secret_mounts.Rd b/sdk/R/man/containers.secret_mounts.Rd
deleted file mode 100644
index d0f8444fa4..0000000000
--- a/sdk/R/man/containers.secret_mounts.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.secret_mounts}
-\alias{containers.secret_mounts}
-\title{containers.secret_mounts}
-\usage{
-arv$containers.secret_mounts(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Container object.
-}
-\description{
-containers.secret_mounts is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.unlock.Rd b/sdk/R/man/containers.unlock.Rd
deleted file mode 100644
index 5c41f2059a..0000000000
--- a/sdk/R/man/containers.unlock.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.unlock}
-\alias{containers.unlock}
-\title{containers.unlock}
-\usage{
-arv$containers.unlock(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Container object.
-}
-\description{
-containers.unlock is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/containers.update.Rd b/sdk/R/man/containers.update.Rd
deleted file mode 100644
index 3a8572670e..0000000000
--- a/sdk/R/man/containers.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{containers.update}
-\alias{containers.update}
-\title{containers.update}
-\usage{
-arv$containers.update(container,
- uuid)
-}
-\arguments{
-\item{container}{Container object.}
-
-\item{uuid}{The UUID of the Container in question.}
-}
-\value{
-Container object.
-}
-\description{
-containers.update is a method defined in Arvados class.
-}
diff --git a/apps/workbench/app/assets/images/dax.png b/sdk/R/man/figures/dax.png
similarity index 100%
rename from apps/workbench/app/assets/images/dax.png
rename to sdk/R/man/figures/dax.png
diff --git a/sdk/R/man/groups.contents.Rd b/sdk/R/man/groups.contents.Rd
deleted file mode 100644
index 26647df4f7..0000000000
--- a/sdk/R/man/groups.contents.Rd
+++ /dev/null
@@ -1,38 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.contents}
-\alias{groups.contents}
-\title{groups.contents}
-\usage{
-arv$groups.contents(filters = NULL,
- where = NULL, order = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- include_trash = NULL, uuid = NULL, recursive = NULL)
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-
-\item{include_trash}{Include items whose is_trashed attribute is true.}
-
-\item{uuid}{}
-
-\item{recursive}{Include contents from child groups recursively.}
-}
-\value{
-Group object.
-}
-\description{
-groups.contents is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.create.Rd b/sdk/R/man/groups.create.Rd
deleted file mode 100644
index 8719603629..0000000000
--- a/sdk/R/man/groups.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.create}
-\alias{groups.create}
-\title{groups.create}
-\usage{
-arv$groups.create(group, ensure_unique_name = "false")
-}
-\arguments{
-\item{group}{Group object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Group object.
-}
-\description{
-groups.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.delete.Rd b/sdk/R/man/groups.delete.Rd
deleted file mode 100644
index 1b4a0d9e62..0000000000
--- a/sdk/R/man/groups.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.delete}
-\alias{groups.delete}
-\title{groups.delete}
-\usage{
-arv$groups.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Group in question.}
-}
-\value{
-Group object.
-}
-\description{
-groups.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.get.Rd b/sdk/R/man/groups.get.Rd
deleted file mode 100644
index 28a1872595..0000000000
--- a/sdk/R/man/groups.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.get}
-\alias{groups.get}
-\title{groups.get}
-\usage{
-arv$groups.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Group in question.}
-}
-\value{
-Group object.
-}
-\description{
-groups.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.list.Rd b/sdk/R/man/groups.list.Rd
deleted file mode 100644
index 7699f3ef4b..0000000000
--- a/sdk/R/man/groups.list.Rd
+++ /dev/null
@@ -1,36 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.list}
-\alias{groups.list}
-\title{groups.list}
-\usage{
-arv$groups.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact", include_trash = NULL)
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-
-\item{include_trash}{Include items whose is_trashed attribute is true.}
-}
-\value{
-GroupList object.
-}
-\description{
-groups.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.trash.Rd b/sdk/R/man/groups.trash.Rd
deleted file mode 100644
index c529618f71..0000000000
--- a/sdk/R/man/groups.trash.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.trash}
-\alias{groups.trash}
-\title{groups.trash}
-\usage{
-arv$groups.trash(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Group object.
-}
-\description{
-groups.trash is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.untrash.Rd b/sdk/R/man/groups.untrash.Rd
deleted file mode 100644
index 014190c8fe..0000000000
--- a/sdk/R/man/groups.untrash.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.untrash}
-\alias{groups.untrash}
-\title{groups.untrash}
-\usage{
-arv$groups.untrash(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Group object.
-}
-\description{
-groups.untrash is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/groups.update.Rd b/sdk/R/man/groups.update.Rd
deleted file mode 100644
index 47abde7a9a..0000000000
--- a/sdk/R/man/groups.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{groups.update}
-\alias{groups.update}
-\title{groups.update}
-\usage{
-arv$groups.update(group, uuid)
-}
-\arguments{
-\item{group}{Group object.}
-
-\item{uuid}{The UUID of the Group in question.}
-}
-\value{
-Group object.
-}
-\description{
-groups.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/humans.create.Rd b/sdk/R/man/humans.create.Rd
deleted file mode 100644
index 44c9aa3082..0000000000
--- a/sdk/R/man/humans.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{humans.create}
-\alias{humans.create}
-\title{humans.create}
-\usage{
-arv$humans.create(human, ensure_unique_name = "false")
-}
-\arguments{
-\item{human}{Human object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Human object.
-}
-\description{
-humans.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/humans.delete.Rd b/sdk/R/man/humans.delete.Rd
deleted file mode 100644
index ae66b8edaf..0000000000
--- a/sdk/R/man/humans.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{humans.delete}
-\alias{humans.delete}
-\title{humans.delete}
-\usage{
-arv$humans.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Human in question.}
-}
-\value{
-Human object.
-}
-\description{
-humans.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/humans.get.Rd b/sdk/R/man/humans.get.Rd
deleted file mode 100644
index 820c562c39..0000000000
--- a/sdk/R/man/humans.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{humans.get}
-\alias{humans.get}
-\title{humans.get}
-\usage{
-arv$humans.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Human in question.}
-}
-\value{
-Human object.
-}
-\description{
-humans.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/humans.list.Rd b/sdk/R/man/humans.list.Rd
deleted file mode 100644
index a8db4c7f69..0000000000
--- a/sdk/R/man/humans.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{humans.list}
-\alias{humans.list}
-\title{humans.list}
-\usage{
-arv$humans.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-HumanList object.
-}
-\description{
-humans.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/humans.update.Rd b/sdk/R/man/humans.update.Rd
deleted file mode 100644
index 83956a25e5..0000000000
--- a/sdk/R/man/humans.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{humans.update}
-\alias{humans.update}
-\title{humans.update}
-\usage{
-arv$humans.update(human, uuid)
-}
-\arguments{
-\item{human}{Human object.}
-
-\item{uuid}{The UUID of the Human in question.}
-}
-\value{
-Human object.
-}
-\description{
-humans.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/job_tasks.create.Rd b/sdk/R/man/job_tasks.create.Rd
deleted file mode 100644
index 2da0b0c56f..0000000000
--- a/sdk/R/man/job_tasks.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{job_tasks.create}
-\alias{job_tasks.create}
-\title{job_tasks.create}
-\usage{
-arv$job_tasks.create(jobtask, ensure_unique_name = "false")
-}
-\arguments{
-\item{jobTask}{JobTask object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-JobTask object.
-}
-\description{
-job_tasks.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/job_tasks.delete.Rd b/sdk/R/man/job_tasks.delete.Rd
deleted file mode 100644
index b78a38e8d2..0000000000
--- a/sdk/R/man/job_tasks.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{job_tasks.delete}
-\alias{job_tasks.delete}
-\title{job_tasks.delete}
-\usage{
-arv$job_tasks.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the JobTask in question.}
-}
-\value{
-JobTask object.
-}
-\description{
-job_tasks.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/job_tasks.get.Rd b/sdk/R/man/job_tasks.get.Rd
deleted file mode 100644
index 07d2054148..0000000000
--- a/sdk/R/man/job_tasks.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{job_tasks.get}
-\alias{job_tasks.get}
-\title{job_tasks.get}
-\usage{
-arv$job_tasks.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the JobTask in question.}
-}
-\value{
-JobTask object.
-}
-\description{
-job_tasks.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/job_tasks.list.Rd b/sdk/R/man/job_tasks.list.Rd
deleted file mode 100644
index 51c4b4942c..0000000000
--- a/sdk/R/man/job_tasks.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{job_tasks.list}
-\alias{job_tasks.list}
-\title{job_tasks.list}
-\usage{
-arv$job_tasks.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-JobTaskList object.
-}
-\description{
-job_tasks.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/job_tasks.update.Rd b/sdk/R/man/job_tasks.update.Rd
deleted file mode 100644
index 42d10bd43f..0000000000
--- a/sdk/R/man/job_tasks.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{job_tasks.update}
-\alias{job_tasks.update}
-\title{job_tasks.update}
-\usage{
-arv$job_tasks.update(jobtask, uuid)
-}
-\arguments{
-\item{jobTask}{JobTask object.}
-
-\item{uuid}{The UUID of the JobTask in question.}
-}
-\value{
-JobTask object.
-}
-\description{
-job_tasks.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.cancel.Rd b/sdk/R/man/jobs.cancel.Rd
deleted file mode 100644
index 7399d28a8d..0000000000
--- a/sdk/R/man/jobs.cancel.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.cancel}
-\alias{jobs.cancel}
-\title{jobs.cancel}
-\usage{
-arv$jobs.cancel(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Job object.
-}
-\description{
-jobs.cancel is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.create.Rd b/sdk/R/man/jobs.create.Rd
deleted file mode 100644
index 4c4d61ae76..0000000000
--- a/sdk/R/man/jobs.create.Rd
+++ /dev/null
@@ -1,29 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.create}
-\alias{jobs.create}
-\title{jobs.create}
-\usage{
-arv$jobs.create(job, ensure_unique_name = "false",
- find_or_create = "false", filters = NULL,
- minimum_script_version = NULL, exclude_script_versions = NULL)
-}
-\arguments{
-\item{job}{Job object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-
-\item{find_or_create}{}
-
-\item{filters}{}
-
-\item{minimum_script_version}{}
-
-\item{exclude_script_versions}{}
-}
-\value{
-Job object.
-}
-\description{
-jobs.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.delete.Rd b/sdk/R/man/jobs.delete.Rd
deleted file mode 100644
index 7f756084a9..0000000000
--- a/sdk/R/man/jobs.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.delete}
-\alias{jobs.delete}
-\title{jobs.delete}
-\usage{
-arv$jobs.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Job in question.}
-}
-\value{
-Job object.
-}
-\description{
-jobs.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.get.Rd b/sdk/R/man/jobs.get.Rd
deleted file mode 100644
index 072b613e1c..0000000000
--- a/sdk/R/man/jobs.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.get}
-\alias{jobs.get}
-\title{jobs.get}
-\usage{
-arv$jobs.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Job in question.}
-}
-\value{
-Job object.
-}
-\description{
-jobs.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.list.Rd b/sdk/R/man/jobs.list.Rd
deleted file mode 100644
index 53055f5878..0000000000
--- a/sdk/R/man/jobs.list.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.list}
-\alias{jobs.list}
-\title{jobs.list}
-\usage{
-arv$jobs.list(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-JobList object.
-}
-\description{
-jobs.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.lock.Rd b/sdk/R/man/jobs.lock.Rd
deleted file mode 100644
index 3c2e232809..0000000000
--- a/sdk/R/man/jobs.lock.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.lock}
-\alias{jobs.lock}
-\title{jobs.lock}
-\usage{
-arv$jobs.lock(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Job object.
-}
-\description{
-jobs.lock is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.queue.Rd b/sdk/R/man/jobs.queue.Rd
deleted file mode 100644
index a9deaa971c..0000000000
--- a/sdk/R/man/jobs.queue.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.queue}
-\alias{jobs.queue}
-\title{jobs.queue}
-\usage{
-arv$jobs.queue(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-Job object.
-}
-\description{
-jobs.queue is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.queue_size.Rd b/sdk/R/man/jobs.queue_size.Rd
deleted file mode 100644
index 21858204b5..0000000000
--- a/sdk/R/man/jobs.queue_size.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.queue_size}
-\alias{jobs.queue_size}
-\title{jobs.queue_size}
-\usage{
-arv$jobs.queue_size(NULL)
-}
-\value{
-Job object.
-}
-\description{
-jobs.queue_size is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/jobs.update.Rd b/sdk/R/man/jobs.update.Rd
deleted file mode 100644
index 666d7fd7f1..0000000000
--- a/sdk/R/man/jobs.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{jobs.update}
-\alias{jobs.update}
-\title{jobs.update}
-\usage{
-arv$jobs.update(job, uuid)
-}
-\arguments{
-\item{job}{Job object.}
-
-\item{uuid}{The UUID of the Job in question.}
-}
-\value{
-Job object.
-}
-\description{
-jobs.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_disks.create.Rd b/sdk/R/man/keep_disks.create.Rd
deleted file mode 100644
index 524c5b6423..0000000000
--- a/sdk/R/man/keep_disks.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_disks.create}
-\alias{keep_disks.create}
-\title{keep_disks.create}
-\usage{
-arv$keep_disks.create(keepdisk,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{keepDisk}{KeepDisk object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-KeepDisk object.
-}
-\description{
-keep_disks.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_disks.delete.Rd b/sdk/R/man/keep_disks.delete.Rd
deleted file mode 100644
index 80f39f3144..0000000000
--- a/sdk/R/man/keep_disks.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_disks.delete}
-\alias{keep_disks.delete}
-\title{keep_disks.delete}
-\usage{
-arv$keep_disks.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the KeepDisk in question.}
-}
-\value{
-KeepDisk object.
-}
-\description{
-keep_disks.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_disks.get.Rd b/sdk/R/man/keep_disks.get.Rd
deleted file mode 100644
index 1b511fe1f8..0000000000
--- a/sdk/R/man/keep_disks.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_disks.get}
-\alias{keep_disks.get}
-\title{keep_disks.get}
-\usage{
-arv$keep_disks.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the KeepDisk in question.}
-}
-\value{
-KeepDisk object.
-}
-\description{
-keep_disks.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_disks.list.Rd b/sdk/R/man/keep_disks.list.Rd
deleted file mode 100644
index fdb599f70f..0000000000
--- a/sdk/R/man/keep_disks.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_disks.list}
-\alias{keep_disks.list}
-\title{keep_disks.list}
-\usage{
-arv$keep_disks.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-KeepDiskList object.
-}
-\description{
-keep_disks.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_disks.ping.Rd b/sdk/R/man/keep_disks.ping.Rd
deleted file mode 100644
index 6ae55955f6..0000000000
--- a/sdk/R/man/keep_disks.ping.Rd
+++ /dev/null
@@ -1,31 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_disks.ping}
-\alias{keep_disks.ping}
-\title{keep_disks.ping}
-\usage{
-arv$keep_disks.ping(uuid = NULL,
- ping_secret, node_uuid = NULL, filesystem_uuid = NULL,
- service_host = NULL, service_port, service_ssl_flag)
-}
-\arguments{
-\item{uuid}{}
-
-\item{ping_secret}{}
-
-\item{node_uuid}{}
-
-\item{filesystem_uuid}{}
-
-\item{service_host}{}
-
-\item{service_port}{}
-
-\item{service_ssl_flag}{}
-}
-\value{
-KeepDisk object.
-}
-\description{
-keep_disks.ping is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_disks.update.Rd b/sdk/R/man/keep_disks.update.Rd
deleted file mode 100644
index 1ca3363cec..0000000000
--- a/sdk/R/man/keep_disks.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_disks.update}
-\alias{keep_disks.update}
-\title{keep_disks.update}
-\usage{
-arv$keep_disks.update(keepdisk,
- uuid)
-}
-\arguments{
-\item{keepDisk}{KeepDisk object.}
-
-\item{uuid}{The UUID of the KeepDisk in question.}
-}
-\value{
-KeepDisk object.
-}
-\description{
-keep_disks.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_services.accessible.Rd b/sdk/R/man/keep_services.accessible.Rd
deleted file mode 100644
index 3caae2f24c..0000000000
--- a/sdk/R/man/keep_services.accessible.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_services.accessible}
-\alias{keep_services.accessible}
-\title{keep_services.accessible}
-\usage{
-arv$keep_services.accessible(NULL)
-}
-\value{
-KeepService object.
-}
-\description{
-keep_services.accessible is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_services.create.Rd b/sdk/R/man/keep_services.create.Rd
deleted file mode 100644
index 59c43ab82c..0000000000
--- a/sdk/R/man/keep_services.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_services.create}
-\alias{keep_services.create}
-\title{keep_services.create}
-\usage{
-arv$keep_services.create(keepservice,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{keepService}{KeepService object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-KeepService object.
-}
-\description{
-keep_services.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_services.delete.Rd b/sdk/R/man/keep_services.delete.Rd
deleted file mode 100644
index 726771e797..0000000000
--- a/sdk/R/man/keep_services.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_services.delete}
-\alias{keep_services.delete}
-\title{keep_services.delete}
-\usage{
-arv$keep_services.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the KeepService in question.}
-}
-\value{
-KeepService object.
-}
-\description{
-keep_services.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_services.get.Rd b/sdk/R/man/keep_services.get.Rd
deleted file mode 100644
index 065cf84657..0000000000
--- a/sdk/R/man/keep_services.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_services.get}
-\alias{keep_services.get}
-\title{keep_services.get}
-\usage{
-arv$keep_services.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the KeepService in question.}
-}
-\value{
-KeepService object.
-}
-\description{
-keep_services.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_services.list.Rd b/sdk/R/man/keep_services.list.Rd
deleted file mode 100644
index 22aa3aa107..0000000000
--- a/sdk/R/man/keep_services.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_services.list}
-\alias{keep_services.list}
-\title{keep_services.list}
-\usage{
-arv$keep_services.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-KeepServiceList object.
-}
-\description{
-keep_services.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/keep_services.update.Rd b/sdk/R/man/keep_services.update.Rd
deleted file mode 100644
index 2680a5c762..0000000000
--- a/sdk/R/man/keep_services.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{keep_services.update}
-\alias{keep_services.update}
-\title{keep_services.update}
-\usage{
-arv$keep_services.update(keepservice,
- uuid)
-}
-\arguments{
-\item{keepService}{KeepService object.}
-
-\item{uuid}{The UUID of the KeepService in question.}
-}
-\value{
-KeepService object.
-}
-\description{
-keep_services.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/links.create.Rd b/sdk/R/man/links.create.Rd
deleted file mode 100644
index 06b012e3af..0000000000
--- a/sdk/R/man/links.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{links.create}
-\alias{links.create}
-\title{links.create}
-\usage{
-arv$links.create(link, ensure_unique_name = "false")
-}
-\arguments{
-\item{link}{Link object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Link object.
-}
-\description{
-links.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/links.delete.Rd b/sdk/R/man/links.delete.Rd
deleted file mode 100644
index 3a78b7f224..0000000000
--- a/sdk/R/man/links.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{links.delete}
-\alias{links.delete}
-\title{links.delete}
-\usage{
-arv$links.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Link in question.}
-}
-\value{
-Link object.
-}
-\description{
-links.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/links.get.Rd b/sdk/R/man/links.get.Rd
deleted file mode 100644
index bf26271266..0000000000
--- a/sdk/R/man/links.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{links.get}
-\alias{links.get}
-\title{links.get}
-\usage{
-arv$links.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Link in question.}
-}
-\value{
-Link object.
-}
-\description{
-links.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/links.get_permissions.Rd b/sdk/R/man/links.get_permissions.Rd
deleted file mode 100644
index 982dbb9691..0000000000
--- a/sdk/R/man/links.get_permissions.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{links.get_permissions}
-\alias{links.get_permissions}
-\title{links.get_permissions}
-\usage{
-arv$links.get_permissions(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-Link object.
-}
-\description{
-links.get_permissions is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/links.list.Rd b/sdk/R/man/links.list.Rd
deleted file mode 100644
index 540fdc1683..0000000000
--- a/sdk/R/man/links.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{links.list}
-\alias{links.list}
-\title{links.list}
-\usage{
-arv$links.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-LinkList object.
-}
-\description{
-links.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/links.update.Rd b/sdk/R/man/links.update.Rd
deleted file mode 100644
index 398b6fd037..0000000000
--- a/sdk/R/man/links.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{links.update}
-\alias{links.update}
-\title{links.update}
-\usage{
-arv$links.update(link, uuid)
-}
-\arguments{
-\item{link}{Link object.}
-
-\item{uuid}{The UUID of the Link in question.}
-}
-\value{
-Link object.
-}
-\description{
-links.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/listAll.Rd b/sdk/R/man/listAll.Rd
index 2084b476fd..b9a5c5d174 100644
--- a/sdk/R/man/listAll.Rd
+++ b/sdk/R/man/listAll.Rd
@@ -1,22 +1,22 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/util.R
-\name{listAll}
-\alias{listAll}
-\title{listAll}
-\usage{
-listAll(fn, ...)
-}
-\arguments{
-\item{fn}{Arvados method used to retrieve items from REST service.}
-
-\item{...}{Optional arguments which will be pased to fn .}
-}
-\description{
-List all resources even if the number of items is greater than maximum API limit.
-}
-\examples{
-\dontrun{
-arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
-cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test\%"))
-}
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/util.R
+\name{listAll}
+\alias{listAll}
+\title{listAll}
+\usage{
+listAll(fn, ...)
+}
+\arguments{
+\item{fn}{Arvados method used to retrieve items from REST service.}
+
+\item{...}{Optional arguments which will be pased to fn .}
+}
+\description{
+List all resources even if the number of items is greater than maximum API limit.
+}
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test\%"))
+}
+}
diff --git a/sdk/R/man/logs.create.Rd b/sdk/R/man/logs.create.Rd
deleted file mode 100644
index a575e5fb8a..0000000000
--- a/sdk/R/man/logs.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{logs.create}
-\alias{logs.create}
-\title{logs.create}
-\usage{
-arv$logs.create(log, ensure_unique_name = "false")
-}
-\arguments{
-\item{log}{Log object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Log object.
-}
-\description{
-logs.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/logs.delete.Rd b/sdk/R/man/logs.delete.Rd
deleted file mode 100644
index 63d6a0bb6a..0000000000
--- a/sdk/R/man/logs.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{logs.delete}
-\alias{logs.delete}
-\title{logs.delete}
-\usage{
-arv$logs.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Log in question.}
-}
-\value{
-Log object.
-}
-\description{
-logs.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/logs.get.Rd b/sdk/R/man/logs.get.Rd
deleted file mode 100644
index d3053d1af7..0000000000
--- a/sdk/R/man/logs.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{logs.get}
-\alias{logs.get}
-\title{logs.get}
-\usage{
-arv$logs.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Log in question.}
-}
-\value{
-Log object.
-}
-\description{
-logs.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/logs.list.Rd b/sdk/R/man/logs.list.Rd
deleted file mode 100644
index 58dbdb71ea..0000000000
--- a/sdk/R/man/logs.list.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{logs.list}
-\alias{logs.list}
-\title{logs.list}
-\usage{
-arv$logs.list(filters = NULL, where = NULL,
- order = NULL, select = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-LogList object.
-}
-\description{
-logs.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/logs.update.Rd b/sdk/R/man/logs.update.Rd
deleted file mode 100644
index efd670c278..0000000000
--- a/sdk/R/man/logs.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{logs.update}
-\alias{logs.update}
-\title{logs.update}
-\usage{
-arv$logs.update(log, uuid)
-}
-\arguments{
-\item{log}{Log object.}
-
-\item{uuid}{The UUID of the Log in question.}
-}
-\value{
-Log object.
-}
-\description{
-logs.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/nodes.create.Rd b/sdk/R/man/nodes.create.Rd
deleted file mode 100644
index eb73e6915b..0000000000
--- a/sdk/R/man/nodes.create.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{nodes.create}
-\alias{nodes.create}
-\title{nodes.create}
-\usage{
-arv$nodes.create(node, ensure_unique_name = "false",
- assign_slot = NULL)
-}
-\arguments{
-\item{node}{Node object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-
-\item{assign_slot}{assign slot and hostname}
-}
-\value{
-Node object.
-}
-\description{
-nodes.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/nodes.delete.Rd b/sdk/R/man/nodes.delete.Rd
deleted file mode 100644
index 0591dedcc2..0000000000
--- a/sdk/R/man/nodes.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{nodes.delete}
-\alias{nodes.delete}
-\title{nodes.delete}
-\usage{
-arv$nodes.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Node in question.}
-}
-\value{
-Node object.
-}
-\description{
-nodes.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/nodes.get.Rd b/sdk/R/man/nodes.get.Rd
deleted file mode 100644
index dcd7b12091..0000000000
--- a/sdk/R/man/nodes.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{nodes.get}
-\alias{nodes.get}
-\title{nodes.get}
-\usage{
-arv$nodes.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Node in question.}
-}
-\value{
-Node object.
-}
-\description{
-nodes.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/nodes.list.Rd b/sdk/R/man/nodes.list.Rd
deleted file mode 100644
index 7ccfad6d93..0000000000
--- a/sdk/R/man/nodes.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{nodes.list}
-\alias{nodes.list}
-\title{nodes.list}
-\usage{
-arv$nodes.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-NodeList object.
-}
-\description{
-nodes.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/nodes.ping.Rd b/sdk/R/man/nodes.ping.Rd
deleted file mode 100644
index e77d2b5b89..0000000000
--- a/sdk/R/man/nodes.ping.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{nodes.ping}
-\alias{nodes.ping}
-\title{nodes.ping}
-\usage{
-arv$nodes.ping(uuid, ping_secret)
-}
-\arguments{
-\item{uuid}{}
-
-\item{ping_secret}{}
-}
-\value{
-Node object.
-}
-\description{
-nodes.ping is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/nodes.update.Rd b/sdk/R/man/nodes.update.Rd
deleted file mode 100644
index f87245f9d4..0000000000
--- a/sdk/R/man/nodes.update.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{nodes.update}
-\alias{nodes.update}
-\title{nodes.update}
-\usage{
-arv$nodes.update(node, uuid, assign_slot = NULL)
-}
-\arguments{
-\item{node}{Node object.}
-
-\item{uuid}{The UUID of the Node in question.}
-
-\item{assign_slot}{assign slot and hostname}
-}
-\value{
-Node object.
-}
-\description{
-nodes.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_instances.cancel.Rd b/sdk/R/man/pipeline_instances.cancel.Rd
deleted file mode 100644
index 026de816fb..0000000000
--- a/sdk/R/man/pipeline_instances.cancel.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_instances.cancel}
-\alias{pipeline_instances.cancel}
-\title{pipeline_instances.cancel}
-\usage{
-arv$pipeline_instances.cancel(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-PipelineInstance object.
-}
-\description{
-pipeline_instances.cancel is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_instances.create.Rd b/sdk/R/man/pipeline_instances.create.Rd
deleted file mode 100644
index 9ee5586c89..0000000000
--- a/sdk/R/man/pipeline_instances.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_instances.create}
-\alias{pipeline_instances.create}
-\title{pipeline_instances.create}
-\usage{
-arv$pipeline_instances.create(pipelineinstance,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{pipelineInstance}{PipelineInstance object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-PipelineInstance object.
-}
-\description{
-pipeline_instances.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_instances.delete.Rd b/sdk/R/man/pipeline_instances.delete.Rd
deleted file mode 100644
index 7297da5bdf..0000000000
--- a/sdk/R/man/pipeline_instances.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_instances.delete}
-\alias{pipeline_instances.delete}
-\title{pipeline_instances.delete}
-\usage{
-arv$pipeline_instances.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the PipelineInstance in question.}
-}
-\value{
-PipelineInstance object.
-}
-\description{
-pipeline_instances.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_instances.get.Rd b/sdk/R/man/pipeline_instances.get.Rd
deleted file mode 100644
index e500df5892..0000000000
--- a/sdk/R/man/pipeline_instances.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_instances.get}
-\alias{pipeline_instances.get}
-\title{pipeline_instances.get}
-\usage{
-arv$pipeline_instances.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the PipelineInstance in question.}
-}
-\value{
-PipelineInstance object.
-}
-\description{
-pipeline_instances.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_instances.list.Rd b/sdk/R/man/pipeline_instances.list.Rd
deleted file mode 100644
index 407f94446d..0000000000
--- a/sdk/R/man/pipeline_instances.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_instances.list}
-\alias{pipeline_instances.list}
-\title{pipeline_instances.list}
-\usage{
-arv$pipeline_instances.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-PipelineInstanceList object.
-}
-\description{
-pipeline_instances.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_instances.update.Rd b/sdk/R/man/pipeline_instances.update.Rd
deleted file mode 100644
index 4a6666004a..0000000000
--- a/sdk/R/man/pipeline_instances.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_instances.update}
-\alias{pipeline_instances.update}
-\title{pipeline_instances.update}
-\usage{
-arv$pipeline_instances.update(pipelineinstance,
- uuid)
-}
-\arguments{
-\item{pipelineInstance}{PipelineInstance object.}
-
-\item{uuid}{The UUID of the PipelineInstance in question.}
-}
-\value{
-PipelineInstance object.
-}
-\description{
-pipeline_instances.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_templates.create.Rd b/sdk/R/man/pipeline_templates.create.Rd
deleted file mode 100644
index afb1e5870b..0000000000
--- a/sdk/R/man/pipeline_templates.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_templates.create}
-\alias{pipeline_templates.create}
-\title{pipeline_templates.create}
-\usage{
-arv$pipeline_templates.create(pipelinetemplate,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{pipelineTemplate}{PipelineTemplate object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-PipelineTemplate object.
-}
-\description{
-pipeline_templates.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_templates.delete.Rd b/sdk/R/man/pipeline_templates.delete.Rd
deleted file mode 100644
index c74d88b058..0000000000
--- a/sdk/R/man/pipeline_templates.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_templates.delete}
-\alias{pipeline_templates.delete}
-\title{pipeline_templates.delete}
-\usage{
-arv$pipeline_templates.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the PipelineTemplate in question.}
-}
-\value{
-PipelineTemplate object.
-}
-\description{
-pipeline_templates.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_templates.get.Rd b/sdk/R/man/pipeline_templates.get.Rd
deleted file mode 100644
index 48ef739505..0000000000
--- a/sdk/R/man/pipeline_templates.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_templates.get}
-\alias{pipeline_templates.get}
-\title{pipeline_templates.get}
-\usage{
-arv$pipeline_templates.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the PipelineTemplate in question.}
-}
-\value{
-PipelineTemplate object.
-}
-\description{
-pipeline_templates.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_templates.list.Rd b/sdk/R/man/pipeline_templates.list.Rd
deleted file mode 100644
index c6c7413d5e..0000000000
--- a/sdk/R/man/pipeline_templates.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_templates.list}
-\alias{pipeline_templates.list}
-\title{pipeline_templates.list}
-\usage{
-arv$pipeline_templates.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-PipelineTemplateList object.
-}
-\description{
-pipeline_templates.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/pipeline_templates.update.Rd b/sdk/R/man/pipeline_templates.update.Rd
deleted file mode 100644
index 25e02bfadb..0000000000
--- a/sdk/R/man/pipeline_templates.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{pipeline_templates.update}
-\alias{pipeline_templates.update}
-\title{pipeline_templates.update}
-\usage{
-arv$pipeline_templates.update(pipelinetemplate,
- uuid)
-}
-\arguments{
-\item{pipelineTemplate}{PipelineTemplate object.}
-
-\item{uuid}{The UUID of the PipelineTemplate in question.}
-}
-\value{
-PipelineTemplate object.
-}
-\description{
-pipeline_templates.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/print.ArvadosFile.Rd b/sdk/R/man/print.ArvadosFile.Rd
index 566ec8b20e..43f734121e 100644
--- a/sdk/R/man/print.ArvadosFile.Rd
+++ b/sdk/R/man/print.ArvadosFile.Rd
@@ -1,16 +1,16 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/ArvadosFile.R
-\name{print.ArvadosFile}
-\alias{print.ArvadosFile}
-\title{print.ArvadosFile}
-\usage{
-\method{print}{ArvadosFile}(x, ...)
-}
-\arguments{
-\item{x}{Instance of ArvadosFile class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for ArvadosFile class
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosFile.R
+\name{print.ArvadosFile}
+\alias{print.ArvadosFile}
+\title{print.ArvadosFile}
+\usage{
+\method{print}{ArvadosFile}(x, ...)
+}
+\arguments{
+\item{x}{Instance of ArvadosFile class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for ArvadosFile class
+}
diff --git a/sdk/R/man/print.Collection.Rd b/sdk/R/man/print.Collection.Rd
index 885238370e..3de4bd541a 100644
--- a/sdk/R/man/print.Collection.Rd
+++ b/sdk/R/man/print.Collection.Rd
@@ -1,16 +1,16 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Collection.R
-\name{print.Collection}
-\alias{print.Collection}
-\title{print.Collection}
-\usage{
-\method{print}{Collection}(x, ...)
-}
-\arguments{
-\item{x}{Instance of Collection class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for Collection class
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Collection.R
+\name{print.Collection}
+\alias{print.Collection}
+\title{print.Collection}
+\usage{
+\method{print}{Collection}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Collection class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Collection class
+}
diff --git a/sdk/R/man/print.Subcollection.Rd b/sdk/R/man/print.Subcollection.Rd
index 621350f603..3bc62c0908 100644
--- a/sdk/R/man/print.Subcollection.Rd
+++ b/sdk/R/man/print.Subcollection.Rd
@@ -1,16 +1,16 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Subcollection.R
-\name{print.Subcollection}
-\alias{print.Subcollection}
-\title{print.Subcollection}
-\usage{
-\method{print}{Subcollection}(x, ...)
-}
-\arguments{
-\item{x}{Instance of Subcollection class}
-
-\item{...}{Optional arguments.}
-}
-\description{
-Custom print function for Subcollection class
-}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Subcollection.R
+\name{print.Subcollection}
+\alias{print.Subcollection}
+\title{print.Subcollection}
+\usage{
+\method{print}{Subcollection}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Subcollection class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Subcollection class
+}
diff --git a/sdk/R/man/projects.create.Rd b/sdk/R/man/projects.create.Rd
deleted file mode 100644
index 66b1f2ab67..0000000000
--- a/sdk/R/man/projects.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{projects.create}
-\alias{projects.create}
-\title{project.create}
-\usage{
-arv$projects.create(group, ensure_unique_name = "false")
-}
-\arguments{
-\item{group}{Group object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Group object.
-}
-\description{
-projects.create wrapps groups.create method by setting group_class attribute to "project".
-}
diff --git a/sdk/R/man/projects.delete.Rd b/sdk/R/man/projects.delete.Rd
deleted file mode 100644
index 7170792ed6..0000000000
--- a/sdk/R/man/projects.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{projects.delete}
-\alias{projects.delete}
-\title{project.delete}
-\usage{
-arv$project.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Group in question.}
-}
-\value{
-Group object.
-}
-\description{
-projects.delete is equivalent to groups.delete method.
-}
diff --git a/sdk/R/man/projects.get.Rd b/sdk/R/man/projects.get.Rd
deleted file mode 100644
index 1939378786..0000000000
--- a/sdk/R/man/projects.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{projects.get}
-\alias{projects.get}
-\title{project.get}
-\usage{
-arv$projects.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Group in question.}
-}
-\value{
-Group object.
-}
-\description{
-projects.get is equivalent to groups.get method.
-}
diff --git a/sdk/R/man/projects.list.Rd b/sdk/R/man/projects.list.Rd
deleted file mode 100644
index ff4c1c9edb..0000000000
--- a/sdk/R/man/projects.list.Rd
+++ /dev/null
@@ -1,38 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{projects.list}
-\alias{projects.list}
-\title{project.list}
-\usage{
-arv$projects.list(filters = NULL,
- where = NULL, order = NULL, distinct = NULL,
- limit = "100", offset = "0", count = "exact",
- include_trash = NULL, uuid = NULL, recursive = NULL)
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-
-\item{include_trash}{Include items whose is_trashed attribute is true.}
-
-\item{uuid}{}
-
-\item{recursive}{Include contents from child groups recursively.}
-}
-\value{
-Group object.
-}
-\description{
-projects.list wrapps groups.list method by setting group_class attribute to "project".
-}
diff --git a/sdk/R/man/projects.update.Rd b/sdk/R/man/projects.update.Rd
deleted file mode 100644
index 824c5b53f5..0000000000
--- a/sdk/R/man/projects.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{projects.update}
-\alias{projects.update}
-\title{project.update}
-\usage{
-arv$projects.update(group, uuid)
-}
-\arguments{
-\item{group}{Group object.}
-
-\item{uuid}{The UUID of the Group in question.}
-}
-\value{
-Group object.
-}
-\description{
-projects.update wrapps groups.update method by setting group_class attribute to "project".
-}
diff --git a/sdk/R/man/repositories.create.Rd b/sdk/R/man/repositories.create.Rd
deleted file mode 100644
index 1603604f3d..0000000000
--- a/sdk/R/man/repositories.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{repositories.create}
-\alias{repositories.create}
-\title{repositories.create}
-\usage{
-arv$repositories.create(repository,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{repository}{Repository object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Repository object.
-}
-\description{
-repositories.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/repositories.delete.Rd b/sdk/R/man/repositories.delete.Rd
deleted file mode 100644
index 36fac73ac1..0000000000
--- a/sdk/R/man/repositories.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{repositories.delete}
-\alias{repositories.delete}
-\title{repositories.delete}
-\usage{
-arv$repositories.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Repository in question.}
-}
-\value{
-Repository object.
-}
-\description{
-repositories.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/repositories.get.Rd b/sdk/R/man/repositories.get.Rd
deleted file mode 100644
index b855b76d87..0000000000
--- a/sdk/R/man/repositories.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{repositories.get}
-\alias{repositories.get}
-\title{repositories.get}
-\usage{
-arv$repositories.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Repository in question.}
-}
-\value{
-Repository object.
-}
-\description{
-repositories.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/repositories.get_all_permissions.Rd b/sdk/R/man/repositories.get_all_permissions.Rd
deleted file mode 100644
index f16dbd1d92..0000000000
--- a/sdk/R/man/repositories.get_all_permissions.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{repositories.get_all_permissions}
-\alias{repositories.get_all_permissions}
-\title{repositories.get_all_permissions}
-\usage{
-arv$repositories.get_all_permissions(NULL)
-}
-\value{
-Repository object.
-}
-\description{
-repositories.get_all_permissions is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/repositories.list.Rd b/sdk/R/man/repositories.list.Rd
deleted file mode 100644
index d1f4772e74..0000000000
--- a/sdk/R/man/repositories.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{repositories.list}
-\alias{repositories.list}
-\title{repositories.list}
-\usage{
-arv$repositories.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-RepositoryList object.
-}
-\description{
-repositories.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/repositories.update.Rd b/sdk/R/man/repositories.update.Rd
deleted file mode 100644
index 1be4b61488..0000000000
--- a/sdk/R/man/repositories.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{repositories.update}
-\alias{repositories.update}
-\title{repositories.update}
-\usage{
-arv$repositories.update(repository,
- uuid)
-}
-\arguments{
-\item{repository}{Repository object.}
-
-\item{uuid}{The UUID of the Repository in question.}
-}
-\value{
-Repository object.
-}
-\description{
-repositories.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/specimens.create.Rd b/sdk/R/man/specimens.create.Rd
deleted file mode 100644
index 12344f29ff..0000000000
--- a/sdk/R/man/specimens.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{specimens.create}
-\alias{specimens.create}
-\title{specimens.create}
-\usage{
-arv$specimens.create(specimen,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{specimen}{Specimen object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Specimen object.
-}
-\description{
-specimens.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/specimens.delete.Rd b/sdk/R/man/specimens.delete.Rd
deleted file mode 100644
index 8ed2d397ab..0000000000
--- a/sdk/R/man/specimens.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{specimens.delete}
-\alias{specimens.delete}
-\title{specimens.delete}
-\usage{
-arv$specimens.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Specimen in question.}
-}
-\value{
-Specimen object.
-}
-\description{
-specimens.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/specimens.get.Rd b/sdk/R/man/specimens.get.Rd
deleted file mode 100644
index e757056a20..0000000000
--- a/sdk/R/man/specimens.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{specimens.get}
-\alias{specimens.get}
-\title{specimens.get}
-\usage{
-arv$specimens.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Specimen in question.}
-}
-\value{
-Specimen object.
-}
-\description{
-specimens.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/specimens.list.Rd b/sdk/R/man/specimens.list.Rd
deleted file mode 100644
index 4e07f4ab28..0000000000
--- a/sdk/R/man/specimens.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{specimens.list}
-\alias{specimens.list}
-\title{specimens.list}
-\usage{
-arv$specimens.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-SpecimenList object.
-}
-\description{
-specimens.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/specimens.update.Rd b/sdk/R/man/specimens.update.Rd
deleted file mode 100644
index 73a9010878..0000000000
--- a/sdk/R/man/specimens.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{specimens.update}
-\alias{specimens.update}
-\title{specimens.update}
-\usage{
-arv$specimens.update(specimen,
- uuid)
-}
-\arguments{
-\item{specimen}{Specimen object.}
-
-\item{uuid}{The UUID of the Specimen in question.}
-}
-\value{
-Specimen object.
-}
-\description{
-specimens.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/traits.create.Rd b/sdk/R/man/traits.create.Rd
deleted file mode 100644
index bf6e0c1e1e..0000000000
--- a/sdk/R/man/traits.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{traits.create}
-\alias{traits.create}
-\title{traits.create}
-\usage{
-arv$traits.create(trait, ensure_unique_name = "false")
-}
-\arguments{
-\item{trait}{Trait object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Trait object.
-}
-\description{
-traits.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/traits.delete.Rd b/sdk/R/man/traits.delete.Rd
deleted file mode 100644
index 9ab957002c..0000000000
--- a/sdk/R/man/traits.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{traits.delete}
-\alias{traits.delete}
-\title{traits.delete}
-\usage{
-arv$traits.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Trait in question.}
-}
-\value{
-Trait object.
-}
-\description{
-traits.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/traits.get.Rd b/sdk/R/man/traits.get.Rd
deleted file mode 100644
index 7d2bac56b4..0000000000
--- a/sdk/R/man/traits.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{traits.get}
-\alias{traits.get}
-\title{traits.get}
-\usage{
-arv$traits.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Trait in question.}
-}
-\value{
-Trait object.
-}
-\description{
-traits.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/traits.list.Rd b/sdk/R/man/traits.list.Rd
deleted file mode 100644
index e91b929949..0000000000
--- a/sdk/R/man/traits.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{traits.list}
-\alias{traits.list}
-\title{traits.list}
-\usage{
-arv$traits.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-TraitList object.
-}
-\description{
-traits.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/traits.update.Rd b/sdk/R/man/traits.update.Rd
deleted file mode 100644
index f594434224..0000000000
--- a/sdk/R/man/traits.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{traits.update}
-\alias{traits.update}
-\title{traits.update}
-\usage{
-arv$traits.update(trait, uuid)
-}
-\arguments{
-\item{trait}{Trait object.}
-
-\item{uuid}{The UUID of the Trait in question.}
-}
-\value{
-Trait object.
-}
-\description{
-traits.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.create.Rd b/sdk/R/man/user_agreements.create.Rd
deleted file mode 100644
index 79913057cf..0000000000
--- a/sdk/R/man/user_agreements.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.create}
-\alias{user_agreements.create}
-\title{user_agreements.create}
-\usage{
-arv$user_agreements.create(useragreement,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{userAgreement}{UserAgreement object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.delete.Rd b/sdk/R/man/user_agreements.delete.Rd
deleted file mode 100644
index 30c9bf80b8..0000000000
--- a/sdk/R/man/user_agreements.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.delete}
-\alias{user_agreements.delete}
-\title{user_agreements.delete}
-\usage{
-arv$user_agreements.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the UserAgreement in question.}
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.get.Rd b/sdk/R/man/user_agreements.get.Rd
deleted file mode 100644
index 63116059e4..0000000000
--- a/sdk/R/man/user_agreements.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.get}
-\alias{user_agreements.get}
-\title{user_agreements.get}
-\usage{
-arv$user_agreements.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the UserAgreement in question.}
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.list.Rd b/sdk/R/man/user_agreements.list.Rd
deleted file mode 100644
index 5e6986189d..0000000000
--- a/sdk/R/man/user_agreements.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.list}
-\alias{user_agreements.list}
-\title{user_agreements.list}
-\usage{
-arv$user_agreements.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-UserAgreementList object.
-}
-\description{
-user_agreements.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.new.Rd b/sdk/R/man/user_agreements.new.Rd
deleted file mode 100644
index c213cb4258..0000000000
--- a/sdk/R/man/user_agreements.new.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.new}
-\alias{user_agreements.new}
-\title{user_agreements.new}
-\usage{
-arv$user_agreements.new(NULL)
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.new is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.sign.Rd b/sdk/R/man/user_agreements.sign.Rd
deleted file mode 100644
index 98116106bf..0000000000
--- a/sdk/R/man/user_agreements.sign.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.sign}
-\alias{user_agreements.sign}
-\title{user_agreements.sign}
-\usage{
-arv$user_agreements.sign(NULL)
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.sign is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.signatures.Rd b/sdk/R/man/user_agreements.signatures.Rd
deleted file mode 100644
index d889579bb8..0000000000
--- a/sdk/R/man/user_agreements.signatures.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.signatures}
-\alias{user_agreements.signatures}
-\title{user_agreements.signatures}
-\usage{
-arv$user_agreements.signatures(NULL)
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.signatures is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/user_agreements.update.Rd b/sdk/R/man/user_agreements.update.Rd
deleted file mode 100644
index 578e17954e..0000000000
--- a/sdk/R/man/user_agreements.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{user_agreements.update}
-\alias{user_agreements.update}
-\title{user_agreements.update}
-\usage{
-arv$user_agreements.update(useragreement,
- uuid)
-}
-\arguments{
-\item{userAgreement}{UserAgreement object.}
-
-\item{uuid}{The UUID of the UserAgreement in question.}
-}
-\value{
-UserAgreement object.
-}
-\description{
-user_agreements.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.activate.Rd b/sdk/R/man/users.activate.Rd
deleted file mode 100644
index 201caf4c89..0000000000
--- a/sdk/R/man/users.activate.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.activate}
-\alias{users.activate}
-\title{users.activate}
-\usage{
-arv$users.activate(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-User object.
-}
-\description{
-users.activate is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.create.Rd b/sdk/R/man/users.create.Rd
deleted file mode 100644
index 1805c66d25..0000000000
--- a/sdk/R/man/users.create.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.create}
-\alias{users.create}
-\title{users.create}
-\usage{
-arv$users.create(user, ensure_unique_name = "false")
-}
-\arguments{
-\item{user}{User object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-User object.
-}
-\description{
-users.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.current.Rd b/sdk/R/man/users.current.Rd
deleted file mode 100644
index 4e8af94fb6..0000000000
--- a/sdk/R/man/users.current.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.current}
-\alias{users.current}
-\title{users.current}
-\usage{
-arv$users.current(NULL)
-}
-\value{
-User object.
-}
-\description{
-users.current is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.delete.Rd b/sdk/R/man/users.delete.Rd
deleted file mode 100644
index df9e23880d..0000000000
--- a/sdk/R/man/users.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.delete}
-\alias{users.delete}
-\title{users.delete}
-\usage{
-arv$users.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the User in question.}
-}
-\value{
-User object.
-}
-\description{
-users.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.get.Rd b/sdk/R/man/users.get.Rd
deleted file mode 100644
index ec2b284ee1..0000000000
--- a/sdk/R/man/users.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.get}
-\alias{users.get}
-\title{users.get}
-\usage{
-arv$users.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the User in question.}
-}
-\value{
-User object.
-}
-\description{
-users.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.list.Rd b/sdk/R/man/users.list.Rd
deleted file mode 100644
index 71319433b8..0000000000
--- a/sdk/R/man/users.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.list}
-\alias{users.list}
-\title{users.list}
-\usage{
-arv$users.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-UserList object.
-}
-\description{
-users.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.merge.Rd b/sdk/R/man/users.merge.Rd
deleted file mode 100644
index a539591a6a..0000000000
--- a/sdk/R/man/users.merge.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.merge}
-\alias{users.merge}
-\title{users.merge}
-\usage{
-arv$users.merge(new_owner_uuid,
- new_user_token, redirect_to_new_user = NULL)
-}
-\arguments{
-\item{new_owner_uuid}{}
-
-\item{new_user_token}{}
-
-\item{redirect_to_new_user}{}
-}
-\value{
-User object.
-}
-\description{
-users.merge is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.setup.Rd b/sdk/R/man/users.setup.Rd
deleted file mode 100644
index 869403d3ed..0000000000
--- a/sdk/R/man/users.setup.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.setup}
-\alias{users.setup}
-\title{users.setup}
-\usage{
-arv$users.setup(user = NULL, openid_prefix = NULL,
- repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
-}
-\arguments{
-\item{user}{}
-
-\item{openid_prefix}{}
-
-\item{repo_name}{}
-
-\item{vm_uuid}{}
-
-\item{send_notification_email}{}
-}
-\value{
-User object.
-}
-\description{
-users.setup is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.system.Rd b/sdk/R/man/users.system.Rd
deleted file mode 100644
index c321c23cdc..0000000000
--- a/sdk/R/man/users.system.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.system}
-\alias{users.system}
-\title{users.system}
-\usage{
-arv$users.system(NULL)
-}
-\value{
-User object.
-}
-\description{
-users.system is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.unsetup.Rd b/sdk/R/man/users.unsetup.Rd
deleted file mode 100644
index 85de6f9782..0000000000
--- a/sdk/R/man/users.unsetup.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.unsetup}
-\alias{users.unsetup}
-\title{users.unsetup}
-\usage{
-arv$users.unsetup(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-User object.
-}
-\description{
-users.unsetup is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/users.update.Rd b/sdk/R/man/users.update.Rd
deleted file mode 100644
index fcd9c71c78..0000000000
--- a/sdk/R/man/users.update.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{users.update}
-\alias{users.update}
-\title{users.update}
-\usage{
-arv$users.update(user, uuid)
-}
-\arguments{
-\item{user}{User object.}
-
-\item{uuid}{The UUID of the User in question.}
-}
-\value{
-User object.
-}
-\description{
-users.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.create.Rd b/sdk/R/man/virtual_machines.create.Rd
deleted file mode 100644
index 689a0f9899..0000000000
--- a/sdk/R/man/virtual_machines.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.create}
-\alias{virtual_machines.create}
-\title{virtual_machines.create}
-\usage{
-arv$virtual_machines.create(virtualmachine,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{virtualMachine}{VirtualMachine object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-VirtualMachine object.
-}
-\description{
-virtual_machines.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.delete.Rd b/sdk/R/man/virtual_machines.delete.Rd
deleted file mode 100644
index c513833db6..0000000000
--- a/sdk/R/man/virtual_machines.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.delete}
-\alias{virtual_machines.delete}
-\title{virtual_machines.delete}
-\usage{
-arv$virtual_machines.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the VirtualMachine in question.}
-}
-\value{
-VirtualMachine object.
-}
-\description{
-virtual_machines.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.get.Rd b/sdk/R/man/virtual_machines.get.Rd
deleted file mode 100644
index 3e56e17ea4..0000000000
--- a/sdk/R/man/virtual_machines.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.get}
-\alias{virtual_machines.get}
-\title{virtual_machines.get}
-\usage{
-arv$virtual_machines.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the VirtualMachine in question.}
-}
-\value{
-VirtualMachine object.
-}
-\description{
-virtual_machines.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.get_all_logins.Rd b/sdk/R/man/virtual_machines.get_all_logins.Rd
deleted file mode 100644
index b2af1e492d..0000000000
--- a/sdk/R/man/virtual_machines.get_all_logins.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.get_all_logins}
-\alias{virtual_machines.get_all_logins}
-\title{virtual_machines.get_all_logins}
-\usage{
-arv$virtual_machines.get_all_logins(NULL)
-}
-\value{
-VirtualMachine object.
-}
-\description{
-virtual_machines.get_all_logins is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.list.Rd b/sdk/R/man/virtual_machines.list.Rd
deleted file mode 100644
index 42ed58b0e1..0000000000
--- a/sdk/R/man/virtual_machines.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.list}
-\alias{virtual_machines.list}
-\title{virtual_machines.list}
-\usage{
-arv$virtual_machines.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-VirtualMachineList object.
-}
-\description{
-virtual_machines.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.logins.Rd b/sdk/R/man/virtual_machines.logins.Rd
deleted file mode 100644
index 7e25110aa4..0000000000
--- a/sdk/R/man/virtual_machines.logins.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.logins}
-\alias{virtual_machines.logins}
-\title{virtual_machines.logins}
-\usage{
-arv$virtual_machines.logins(uuid)
-}
-\arguments{
-\item{uuid}{}
-}
-\value{
-VirtualMachine object.
-}
-\description{
-virtual_machines.logins is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/virtual_machines.update.Rd b/sdk/R/man/virtual_machines.update.Rd
deleted file mode 100644
index d1a07eb338..0000000000
--- a/sdk/R/man/virtual_machines.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{virtual_machines.update}
-\alias{virtual_machines.update}
-\title{virtual_machines.update}
-\usage{
-arv$virtual_machines.update(virtualmachine,
- uuid)
-}
-\arguments{
-\item{virtualMachine}{VirtualMachine object.}
-
-\item{uuid}{The UUID of the VirtualMachine in question.}
-}
-\value{
-VirtualMachine object.
-}
-\description{
-virtual_machines.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/workflows.create.Rd b/sdk/R/man/workflows.create.Rd
deleted file mode 100644
index 8a84e00465..0000000000
--- a/sdk/R/man/workflows.create.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{workflows.create}
-\alias{workflows.create}
-\title{workflows.create}
-\usage{
-arv$workflows.create(workflow,
- ensure_unique_name = "false")
-}
-\arguments{
-\item{workflow}{Workflow object.}
-
-\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
-}
-\value{
-Workflow object.
-}
-\description{
-workflows.create is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/workflows.delete.Rd b/sdk/R/man/workflows.delete.Rd
deleted file mode 100644
index 96a561e367..0000000000
--- a/sdk/R/man/workflows.delete.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{workflows.delete}
-\alias{workflows.delete}
-\title{workflows.delete}
-\usage{
-arv$workflows.delete(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Workflow in question.}
-}
-\value{
-Workflow object.
-}
-\description{
-workflows.delete is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/workflows.get.Rd b/sdk/R/man/workflows.get.Rd
deleted file mode 100644
index 8a8c3a8bd4..0000000000
--- a/sdk/R/man/workflows.get.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{workflows.get}
-\alias{workflows.get}
-\title{workflows.get}
-\usage{
-arv$workflows.get(uuid)
-}
-\arguments{
-\item{uuid}{The UUID of the Workflow in question.}
-}
-\value{
-Workflow object.
-}
-\description{
-workflows.get is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/workflows.list.Rd b/sdk/R/man/workflows.list.Rd
deleted file mode 100644
index e24b74d030..0000000000
--- a/sdk/R/man/workflows.list.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{workflows.list}
-\alias{workflows.list}
-\title{workflows.list}
-\usage{
-arv$workflows.list(filters = NULL,
- where = NULL, order = NULL, select = NULL,
- distinct = NULL, limit = "100", offset = "0",
- count = "exact")
-}
-\arguments{
-\item{filters}{}
-
-\item{where}{}
-
-\item{order}{}
-
-\item{select}{}
-
-\item{distinct}{}
-
-\item{limit}{}
-
-\item{offset}{}
-
-\item{count}{}
-}
-\value{
-WorkflowList object.
-}
-\description{
-workflows.list is a method defined in Arvados class.
-}
diff --git a/sdk/R/man/workflows.update.Rd b/sdk/R/man/workflows.update.Rd
deleted file mode 100644
index d3f6186a01..0000000000
--- a/sdk/R/man/workflows.update.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Arvados.R
-\name{workflows.update}
-\alias{workflows.update}
-\title{workflows.update}
-\usage{
-arv$workflows.update(workflow,
- uuid)
-}
-\arguments{
-\item{workflow}{Workflow object.}
-
-\item{uuid}{The UUID of the Workflow in question.}
-}
-\value{
-Workflow object.
-}
-\description{
-workflows.update is a method defined in Arvados class.
-}
diff --git a/sdk/R/run_test.R b/sdk/R/run_test.R
index 156dde1080..1384c1f8c1 100644
--- a/sdk/R/run_test.R
+++ b/sdk/R/run_test.R
@@ -2,6 +2,8 @@
#
# SPDX-License-Identifier: Apache-2.0
+devtools::check()
+
results <- devtools::test()
any_error <- any(as.data.frame(results)$error)
if (any_error) {
diff --git a/sdk/R/tests/testthat/fakes/FakeRESTService.R b/sdk/R/tests/testthat/fakes/FakeRESTService.R
index a91da04fd1..255e64d1b4 100644
--- a/sdk/R/tests/testthat/fakes/FakeRESTService.R
+++ b/sdk/R/tests/testthat/fakes/FakeRESTService.R
@@ -143,10 +143,14 @@ FakeRESTService <- R6::R6Class(
self$returnContent
},
- getCollectionContent = function(uuid)
+ getCollectionContent = function(uuid, relativePath = NULL)
{
self$getCollectionContentCallCount <- self$getCollectionContentCallCount + 1
- self$collectionContent
+ if (!is.null(relativePath)) {
+ self$collectionContent[startsWith(self$collectionContent, relativePath)]
+ } else {
+ self$collectionContent
+ }
},
getResourceSize = function(uuid, relativePathToResource)
diff --git a/sdk/R/tests/testthat/test-Collection.R b/sdk/R/tests/testthat/test-Collection.R
index 20a2ecf05b..3023a1b23f 100644
--- a/sdk/R/tests/testthat/test-Collection.R
+++ b/sdk/R/tests/testthat/test-Collection.R
@@ -239,6 +239,12 @@ test_that("get returns arvados file or subcollection from internal tree structur
expect_true(fishIsNotNull)
expect_that(fish$getName(), equals("fish"))
+
+ ball <- collection$get("ball")
+ ballIsNotNull <- !is.null(ball)
+
+ expect_true(ballIsNotNull)
+ expect_that(ball$getName(), equals("ball"))
})
test_that(paste("copy copies content to a new location inside file tree",
diff --git a/sdk/cli/Gemfile b/sdk/cli/Gemfile
index 61cf76dbdd..f34204e029 100644
--- a/sdk/cli/Gemfile
+++ b/sdk/cli/Gemfile
@@ -6,4 +6,3 @@ source 'https://rubygems.org'
gemspec
gem 'minitest', '>= 5.0.0'
gem 'rake'
-gem 'signet', '<= 0.11'
diff --git a/sdk/cli/arvados-cli.gemspec b/sdk/cli/arvados-cli.gemspec
index 1ff841acdd..67f93c19c3 100644
--- a/sdk/cli/arvados-cli.gemspec
+++ b/sdk/cli/arvados-cli.gemspec
@@ -38,13 +38,12 @@ Gem::Specification.new do |s|
s.files = ["bin/arv", "bin/arv-tag", "LICENSE-2.0.txt"]
s.executables << "arv"
s.executables << "arv-tag"
- s.required_ruby_version = '>= 2.1.0'
- s.add_runtime_dependency 'arvados', '>= 1.4.1.20190320201707'
- # Our google-api-client dependency used to be < 0.9, but that could be
- # satisfied by the buggy 0.9.pre*, cf. https://dev.arvados.org/issues/9213
- # We need at least version 0.8.7.3, cf. https://dev.arvados.org/issues/15673
- s.add_runtime_dependency('arvados-google-api-client', '>= 0.8.7.3', '< 0.8.9')
- s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 5.3'
+ s.required_ruby_version = '>= 2.7.0'
+ s.add_runtime_dependency 'arvados', '~> 2.8.a'
+ # arvados fork of google-api-client gem with old API and new
+ # compatibility fixes, built from ../ruby-google-api-client/
+ s.add_runtime_dependency('arvados-google-api-client', '>= 0.8.7.5', '< 0.8.9')
+ s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 8.0'
s.add_runtime_dependency 'json', '>= 1.7.7', '<3'
s.add_runtime_dependency 'optimist', '~> 3.0'
s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
diff --git a/sdk/cwl/arvados_cwl/__init__.py b/sdk/cwl/arvados_cwl/__init__.py
index 08a05d571c..7e13488758 100644
--- a/sdk/cwl/arvados_cwl/__init__.py
+++ b/sdk/cwl/arvados_cwl/__init__.py
@@ -10,11 +10,12 @@ from future.utils import viewitems
from builtins import str
import argparse
+import importlib.metadata
+import importlib.resources
import logging
import os
import sys
import re
-import pkg_resources # part of setuptools
from schema_salad.sourceline import SourceLine
import schema_salad.validate as validate
@@ -28,14 +29,15 @@ from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing
import arvados
import arvados.config
+import arvados.logging
from arvados.keep import KeepClient
from arvados.errors import ApiError
import arvados.commands._util as arv_cmd
-from arvados.api import OrderedJsonModel
from .perf import Perf
from ._version import __version__
from .executor import ArvCwlExecutor
+from .fsaccess import workflow_uuid_pattern
# These aren't used directly in this file but
# other code expects to import them from here
@@ -56,18 +58,18 @@ arvados.log_handler.setFormatter(logging.Formatter(
def versionstring():
"""Print version string of key packages for provenance and debugging."""
-
- arvcwlpkg = pkg_resources.require("arvados-cwl-runner")
- arvpkg = pkg_resources.require("arvados-python-client")
- cwlpkg = pkg_resources.require("cwltool")
-
- return "%s %s, %s %s, %s %s" % (sys.argv[0], arvcwlpkg[0].version,
- "arvados-python-client", arvpkg[0].version,
- "cwltool", cwlpkg[0].version)
-
+ return "{} {}, arvados-python-client {}, cwltool {}".format(
+ sys.argv[0],
+ importlib.metadata.version('arvados-cwl-runner'),
+ importlib.metadata.version('arvados-python-client'),
+ importlib.metadata.version('cwltool'),
+ )
def arg_parser(): # type: () -> argparse.ArgumentParser
- parser = argparse.ArgumentParser(description='Arvados executor for Common Workflow Language')
+ parser = argparse.ArgumentParser(
+ description='Arvados executor for Common Workflow Language',
+ parents=[arv_cmd.retry_opt],
+ )
parser.add_argument("--basedir",
help="Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).")
@@ -119,6 +121,8 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
exgroup.add_argument("--create-workflow", action="store_true", help="Register an Arvados workflow that can be run from Workbench")
exgroup.add_argument("--update-workflow", metavar="UUID", help="Update an existing Arvados workflow with the given UUID.")
+ exgroup.add_argument("--print-keep-deps", action="store_true", help="To assist copying, print a list of Keep collections that this workflow depends on.")
+
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner, wait for completion.",
default=True, dest="wait")
@@ -199,6 +203,10 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
action="store_false", default=True,
help=argparse.SUPPRESS)
+ parser.add_argument("--disable-git", dest="git_info",
+ action="store_false", default=True,
+ help=argparse.SUPPRESS)
+
parser.add_argument("--disable-color", dest="enable_color",
action="store_false", default=True,
help=argparse.SUPPRESS)
@@ -207,12 +215,25 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
action="store_true", default=False,
help=argparse.SUPPRESS)
+ parser.add_argument("--fast-parser", dest="fast_parser",
+ action="store_true", default=False,
+ help=argparse.SUPPRESS)
+
parser.add_argument("--thread-count", type=int,
default=0, help="Number of threads to use for job submit and output collection.")
parser.add_argument("--http-timeout", type=int,
default=5*60, dest="http_timeout", help="API request timeout in seconds. Default is 300 seconds (5 minutes).")
+ parser.add_argument("--defer-downloads", action="store_true", default=False,
+ help="When submitting a workflow, defer downloading HTTP URLs to workflow launch instead of downloading to Keep before submit.")
+
+ parser.add_argument("--varying-url-params", type=str, default="",
+ help="A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.")
+
+ parser.add_argument("--prefer-cached-downloads", action="store_true", default=False,
+ help="If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).")
+
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--enable-preemptible", dest="enable_preemptible", default=None, action="store_true", help="Use preemptible instances. Control individual steps with arv:UsePreemptible hint.")
exgroup.add_argument("--disable-preemptible", dest="enable_preemptible", default=None, action="store_false", help="Don't use preemptible instances.")
@@ -237,6 +258,10 @@ def arg_parser(): # type: () -> argparse.ArgumentParser
default=False, dest="trash_intermediate",
help="Do not trash intermediate outputs (default).")
+ exgroup = parser.add_mutually_exclusive_group()
+ exgroup.add_argument("--enable-usage-report", dest="enable_usage_report", default=None, action="store_true", help="Create usage_report.html with a summary of each step's resource usage.")
+ exgroup.add_argument("--disable-usage-report", dest="enable_usage_report", default=None, action="store_false", help="Disable usage report.")
+
parser.add_argument("workflow", default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
@@ -247,10 +272,8 @@ def add_arv_hints():
cwltool.command_line_tool.ACCEPTLIST_RE = cwltool.command_line_tool.ACCEPTLIST_EN_RELAXED_RE
supported_versions = ["v1.0", "v1.1", "v1.2"]
for s in supported_versions:
- res = pkg_resources.resource_stream(__name__, 'arv-cwl-schema-%s.yml' % s)
- customschema = res.read().decode('utf-8')
+ customschema = importlib.resources.read_text(__name__, f'arv-cwl-schema-{s}.yml', 'utf-8')
use_custom_schema(s, "http://arvados.org/cwl", customschema)
- res.close()
cwltool.process.supportedProcessRequirements.extend([
"http://arvados.org/cwl#RunInSingleContainer",
"http://arvados.org/cwl#OutputDirType",
@@ -266,6 +289,8 @@ def add_arv_hints():
"http://commonwl.org/cwltool#CUDARequirement",
"http://arvados.org/cwl#UsePreemptible",
"http://arvados.org/cwl#OutputCollectionProperties",
+ "http://arvados.org/cwl#KeepCacheTypeRequirement",
+ "http://arvados.org/cwl#OutOfMemoryRetry",
])
def exit_signal_handler(sigcode, frame):
@@ -301,7 +326,9 @@ def main(args=sys.argv[1:],
return 1
arvargs.work_api = want_api
- if (arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
+ workflow_op = arvargs.create_workflow or arvargs.update_workflow or arvargs.print_keep_deps
+
+ if workflow_op and not arvargs.job_order:
job_order_object = ({}, "")
add_arv_hints()
@@ -313,14 +340,32 @@ def main(args=sys.argv[1:],
try:
if api_client is None:
api_client = arvados.safeapi.ThreadSafeApiCache(
- api_params={"model": OrderedJsonModel(), "timeout": arvargs.http_timeout},
- keep_params={"num_retries": 4})
+ api_params={
+ 'num_retries': arvargs.retries,
+ 'timeout': arvargs.http_timeout,
+ },
+ keep_params={
+ 'num_retries': arvargs.retries,
+ },
+ version='v1',
+ )
keep_client = api_client.keep
# Make an API object now so errors are reported early.
api_client.users().current().execute()
if keep_client is None:
- keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4)
- executor = ArvCwlExecutor(api_client, arvargs, keep_client=keep_client, num_retries=4, stdout=stdout)
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True)
+ keep_client = arvados.keep.KeepClient(
+ api_client=api_client,
+ block_cache=block_cache,
+ num_retries=arvargs.retries,
+ )
+ executor = ArvCwlExecutor(
+ api_client,
+ arvargs,
+ keep_client=keep_client,
+ num_retries=arvargs.retries,
+ stdout=stdout,
+ )
except WorkflowException as e:
logger.error(e, exc_info=(sys.exc_info()[1] if arvargs.debug else False))
return 1
@@ -330,9 +375,25 @@ def main(args=sys.argv[1:],
# Note that unless in debug mode, some stack traces related to user
# workflow errors may be suppressed.
+
+ # Set the logging on most modules INFO (instead of default which is WARNING)
+ logger.setLevel(logging.INFO)
+ logging.getLogger('arvados').setLevel(logging.INFO)
+ logging.getLogger('arvados.keep').setLevel(logging.WARNING)
+ # API retries are filtered to the INFO level and can be noisy, but as long as
+ # they succeed we don't need to see warnings about it.
+ googleapiclient_http_logger = logging.getLogger('googleapiclient.http')
+ googleapiclient_http_logger.addFilter(arvados.logging.GoogleHTTPClientFilter())
+ googleapiclient_http_logger.setLevel(logging.WARNING)
+
if arvargs.debug:
logger.setLevel(logging.DEBUG)
logging.getLogger('arvados').setLevel(logging.DEBUG)
+ # In debug mode show logs about retries, but we arn't
+ # debugging the google client so we don't need to see
+ # everything.
+ googleapiclient_http_logger.setLevel(logging.NOTSET)
+ logging.getLogger('googleapiclient').setLevel(logging.INFO)
if arvargs.quiet:
logger.setLevel(logging.WARN)
@@ -359,6 +420,13 @@ def main(args=sys.argv[1:],
# unit tests.
stdout = None
+ executor.loadingContext.default_docker_image = arvargs.submit_runner_image or "arvados/jobs:"+__version__
+
+ if arvargs.workflow.startswith("arvwf:") or workflow_uuid_pattern.match(arvargs.workflow) or arvargs.workflow.startswith("keep:"):
+ executor.loadingContext.do_validate = False
+ if arvargs.submit and not workflow_op:
+ executor.fast_submit = True
+
return cwltool.main.main(args=arvargs,
stdout=stdout,
stderr=stderr,
@@ -369,4 +437,4 @@ def main(args=sys.argv[1:],
custom_schema_callback=add_arv_hints,
loadingContext=executor.loadingContext,
runtimeContext=executor.toplevel_runtimeContext,
- input_required=not (arvargs.create_workflow or arvargs.update_workflow))
+ input_required=not workflow_op)
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
index 54e0fc5122..aeb41db568 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
@@ -420,3 +420,71 @@ $graph:
jsonldPredicate:
mapSubject: propertyName
mapPredicate: propertyValue
+
+
+- name: KeepCacheType
+ type: enum
+ symbols:
+ - ram_cache
+ - disk_cache
+ doc:
+ - |
+ ram_cache: Keep blocks will be cached in RAM only.
+ - |
+ disk_cache: Keep blocks will be cached to disk and
+ memory-mapped. The disk cache leverages the kernel's virtual
+ memory system so "hot" data will generally still be kept in
+ RAM.
+
+- name: KeepCacheTypeRequirement
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Choose keep cache strategy.
+ fields:
+ - name: class
+ type: string
+ doc: "'arv:KeepCacheTypeRequirement'"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: keepCacheType
+ type: KeepCacheType?
+ doc: |
+ Whether Keep blocks loaded by arv-mount should be kept in RAM
+ only or written to disk and memory-mapped. The disk cache
+ leverages the kernel's virtual memory system so "hot" data will
+ generally still be kept in RAM.
+
+- name: OutOfMemoryRetry
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Detect when a failed tool run may have run out of memory, and
+ re-submit the container with more RAM.
+ fields:
+ - name: class
+ type: string
+ doc: "'arv:OutOfMemoryRetry"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: memoryErrorRegex
+ type: string?
+ doc: |
+ A regular expression that will be used on the text of stdout
+ and stderr produced by the tool to determine if a failed job
+ should be retried with more RAM. By default, searches for the
+ substrings 'bad_alloc' and 'OutOfMemory'.
+ - name: memoryRetryMultiplier
+ type: float?
+ doc: |
+ If the container failed on its first run, re-submit the
+ container with the RAM request multiplied by this factor.
+ - name: memoryRetryMultipler
+ type: float?
+ doc: |
+ Deprecated misspelling of "memoryRetryMultiplier". Kept only
+ for backwards compatability, don't use this.
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
index b60d0ab1c9..0e51d50080 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
@@ -363,3 +363,71 @@ $graph:
jsonldPredicate:
mapSubject: propertyName
mapPredicate: propertyValue
+
+
+- name: KeepCacheType
+ type: enum
+ symbols:
+ - ram_cache
+ - disk_cache
+ doc:
+ - |
+ ram_cache: Keep blocks will be cached in RAM only.
+ - |
+ disk_cache: Keep blocks will be cached to disk and
+ memory-mapped. The disk cache leverages the kernel's virtual
+ memory system so "hot" data will generally still be kept in
+ RAM.
+
+- name: KeepCacheTypeRequirement
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Choose keep cache strategy.
+ fields:
+ - name: class
+ type: string
+ doc: "'arv:KeepCacheTypeRequirement'"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: keepCacheType
+ type: KeepCacheType?
+ doc: |
+ Whether Keep blocks loaded by arv-mount should be kept in RAM
+ only or written to disk and memory-mapped. The disk cache
+ leverages the kernel's virtual memory system so "hot" data will
+ generally still be kept in RAM.
+
+- name: OutOfMemoryRetry
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Detect when a failed tool run may have run out of memory, and
+ re-submit the container with more RAM.
+ fields:
+ - name: class
+ type: string
+ doc: "'arv:OutOfMemoryRetry"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: memoryErrorRegex
+ type: string?
+ doc: |
+ A regular expression that will be used on the text of stdout
+ and stderr produced by the tool to determine if a failed job
+ should be retried with more RAM. By default, searches for the
+ substrings 'bad_alloc' and 'OutOfMemory'.
+ - name: memoryRetryMultiplier
+ type: float?
+ doc: |
+ If the container failed on its first run, re-submit the
+ container with the RAM request multiplied by this factor.
+ - name: memoryRetryMultipler
+ type: float?
+ doc: |
+ Deprecated misspelling of "memoryRetryMultiplier". Kept only
+ for backwards compatability, don't use this.
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
index 2769244a5d..a753579c9a 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
@@ -365,3 +365,92 @@ $graph:
jsonldPredicate:
mapSubject: propertyName
mapPredicate: propertyValue
+
+
+- name: KeepCacheType
+ type: enum
+ symbols:
+ - ram_cache
+ - disk_cache
+ doc:
+ - |
+ ram_cache: Keep blocks will be cached in RAM only.
+ - |
+ disk_cache: Keep blocks will be cached to disk and
+ memory-mapped. The disk cache leverages the kernel's virtual
+ memory system so "hot" data will generally still be kept in
+ RAM.
+
+- name: KeepCacheTypeRequirement
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Choose keep cache strategy.
+ fields:
+ - name: class
+ type: string
+ doc: "'arv:KeepCacheTypeRequirement'"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: keepCacheType
+ type: KeepCacheType?
+ doc: |
+ Whether Keep blocks loaded by arv-mount should be kept in RAM
+ only or written to disk and memory-mapped. The disk cache
+ leverages the kernel's virtual memory system so "hot" data will
+ generally still be kept in RAM.
+
+
+- name: OutOfMemoryRetry
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Detect when a failed tool run may have run out of memory, and
+ re-submit the container with more RAM.
+ fields:
+ - name: class
+ type: string
+ doc: "'arv:OutOfMemoryRetry"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: memoryErrorRegex
+ type: string?
+ doc: |
+ A regular expression that will be used on the text of stdout
+ and stderr produced by the tool to determine if a failed job
+ should be retried with more RAM. By default, searches for the
+ substrings 'bad_alloc' and 'OutOfMemory'.
+ - name: memoryRetryMultiplier
+ type: float?
+ doc: |
+ If the container failed on its first run, re-submit the
+ container with the RAM request multiplied by this factor.
+ - name: memoryRetryMultipler
+ type: float?
+ doc: |
+ Deprecated misspelling of "memoryRetryMultiplier". Kept only
+ for backwards compatability, don't use this.
+
+
+- name: SeparateRunner
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Indicates that a subworkflow should run in a separate
+ arvados-cwl-runner process.
+ fields:
+ - name: class
+ type: string
+ doc: "Always 'arv:SeparateRunner'"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ - name: runnerProcessName
+ type: ['null', string, cwl:Expression]
+ doc: |
+ Custom name to use for the runner process
diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py
index 5094ea3bf1..c3b914ba99 100644
--- a/sdk/cwl/arvados_cwl/arvcontainer.py
+++ b/sdk/cwl/arvados_cwl/arvcontainer.py
@@ -15,9 +15,10 @@ import datetime
import ciso8601
import uuid
import math
+import re
import arvados_cwl.util
-import ruamel.yaml as yaml
+import ruamel.yaml
from cwltool.errors import WorkflowException
from cwltool.process import UnsupportedRequirement, shortname
@@ -26,6 +27,9 @@ from cwltool.job import JobBase
import arvados.collection
+import crunchstat_summary.summarizer
+import crunchstat_summary.reader
+
from .arvdocker import arv_docker_get_image
from . import done
from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location, remove_redundant_fields, make_builder
@@ -56,6 +60,7 @@ class ArvadosContainer(JobBase):
self.job_runtime = job_runtime
self.running = False
self.uuid = None
+ self.attempt_count = 0
def update_pipeline_component(self, r):
pass
@@ -88,9 +93,11 @@ class ArvadosContainer(JobBase):
container_request["output_path"] = self.outdir
container_request["cwd"] = self.outdir
container_request["priority"] = runtimeContext.priority
- container_request["state"] = "Committed"
+ container_request["state"] = "Uncommitted"
container_request.setdefault("properties", {})
+ container_request["properties"]["cwl_input"] = self.joborder
+
runtime_constraints = {}
if runtimeContext.project_uuid:
@@ -248,11 +255,7 @@ class ArvadosContainer(JobBase):
container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
docker_req,
runtimeContext.pull_image,
- runtimeContext.project_uuid,
- runtimeContext.force_docker_pull,
- runtimeContext.tmp_outdir_prefix,
- runtimeContext.match_local_docker,
- runtimeContext.copy_deps)
+ runtimeContext)
network_req, _ = self.get_requirement("NetworkAccess")
if network_req:
@@ -262,10 +265,22 @@ class ArvadosContainer(JobBase):
if api_req:
runtime_constraints["API"] = True
+ use_disk_cache = (self.arvrunner.api.config()["Containers"].get("DefaultKeepCacheRAM", 0) == 0)
+
+ keep_cache_type_req, _ = self.get_requirement("http://arvados.org/cwl#KeepCacheTypeRequirement")
+ if keep_cache_type_req:
+ if "keepCacheType" in keep_cache_type_req:
+ if keep_cache_type_req["keepCacheType"] == "ram_cache":
+ use_disk_cache = False
+
runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
if runtime_req:
if "keep_cache" in runtime_req:
- runtime_constraints["keep_cache_ram"] = math.ceil(runtime_req["keep_cache"] * 2**20)
+ if use_disk_cache:
+ # If DefaultKeepCacheRAM is zero it means we should use disk cache.
+ runtime_constraints["keep_cache_disk"] = math.ceil(runtime_req["keep_cache"] * 2**20)
+ else:
+ runtime_constraints["keep_cache_ram"] = math.ceil(runtime_req["keep_cache"] * 2**20)
if "outputDirType" in runtime_req:
if runtime_req["outputDirType"] == "local_output_dir":
# Currently the default behavior.
@@ -355,6 +370,17 @@ class ArvadosContainer(JobBase):
logger.warning("%s API revision is %s, revision %s is required to support setting properties on output collections.",
self.arvrunner.label(self), self.arvrunner.api._rootDesc["revision"], "20220510")
+ ram_multiplier = [1]
+
+ oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
+ if oom_retry_req:
+ if oom_retry_req.get('memoryRetryMultiplier'):
+ ram_multiplier.append(oom_retry_req.get('memoryRetryMultiplier'))
+ elif oom_retry_req.get('memoryRetryMultipler'):
+ ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+ else:
+ ram_multiplier.append(2)
+
if runtimeContext.runnerjob.startswith("arvwf:"):
wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")]
wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries)
@@ -362,23 +388,45 @@ class ArvadosContainer(JobBase):
container_request["name"] = wfrecord["name"]
container_request["properties"]["template_uuid"] = wfuuid
- self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
+ if self.attempt_count == 0:
+ self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
try:
- if runtimeContext.submit_request_uuid:
- response = self.arvrunner.api.container_requests().update(
- uuid=runtimeContext.submit_request_uuid,
- body=container_request,
- **extra_submit_params
- ).execute(num_retries=self.arvrunner.num_retries)
- else:
- response = self.arvrunner.api.container_requests().create(
- body=container_request,
- **extra_submit_params
- ).execute(num_retries=self.arvrunner.num_retries)
+ ram = runtime_constraints["ram"]
+
+ self.uuid = runtimeContext.submit_request_uuid
+
+ for i in ram_multiplier:
+ runtime_constraints["ram"] = ram * i
+
+ if self.uuid:
+ response = self.arvrunner.api.container_requests().update(
+ uuid=self.uuid,
+ body=container_request,
+ **extra_submit_params
+ ).execute(num_retries=self.arvrunner.num_retries)
+ else:
+ response = self.arvrunner.api.container_requests().create(
+ body=container_request,
+ **extra_submit_params
+ ).execute(num_retries=self.arvrunner.num_retries)
+ self.uuid = response["uuid"]
+
+ if response["container_uuid"] is not None:
+ break
+
+ if response["container_uuid"] is None:
+ runtime_constraints["ram"] = ram * ram_multiplier[self.attempt_count]
+
+ container_request["state"] = "Committed"
+ response = self.arvrunner.api.container_requests().update(
+ uuid=self.uuid,
+ body=container_request,
+ **extra_submit_params
+ ).execute(num_retries=self.arvrunner.num_retries)
- self.uuid = response["uuid"]
self.arvrunner.process_submitted(self)
+ self.attempt_count += 1
if response["state"] == "Final":
logger.info("%s reused container %s", self.arvrunner.label(self), response["container_uuid"])
@@ -389,8 +437,37 @@ class ArvadosContainer(JobBase):
logger.debug("Container request was %s", container_request)
self.output_callback({}, "permanentFail")
+ def out_of_memory_retry(self, record, container):
+ oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
+ if oom_retry_req is None:
+ return False
+
+ # Sometimes it gets killed with no warning
+ if container["exit_code"] == 137:
+ return True
+
+ logc = arvados.collection.CollectionReader(record["log_uuid"],
+ api_client=self.arvrunner.api,
+ keep_client=self.arvrunner.keep_client,
+ num_retries=self.arvrunner.num_retries)
+
+ loglines = [""]
+ def callback(v1, v2, v3):
+ loglines[0] = v3
+
+ done.logtail(logc, callback, "", maxlen=1000)
+
+ # Check allocation failure
+ oom_matches = oom_retry_req.get('memoryErrorRegex') or r'(bad_alloc|out ?of ?memory|memory ?error|container using over 9.% of memory)'
+ if re.search(oom_matches, loglines[0], re.IGNORECASE | re.MULTILINE):
+ return True
+
+ return False
+
def done(self, record):
outputs = {}
+ retried = False
+ rcode = None
try:
container = self.arvrunner.api.containers().get(
uuid=record["container_uuid"]
@@ -408,21 +485,33 @@ class ArvadosContainer(JobBase):
else:
processStatus = "permanentFail"
+ if processStatus == "permanentFail" and self.attempt_count == 1 and self.out_of_memory_retry(record, container):
+ logger.warning("%s Container failed with out of memory error, retrying with more RAM.",
+ self.arvrunner.label(self))
+ self.job_runtime.submit_request_uuid = None
+ self.uuid = None
+ self.run(None)
+ retried = True
+ return
+
if rcode == 137:
- logger.warning("%s Container may have been killed for using too much RAM. Try resubmitting with a higher 'ramMin'.",
+ logger.warning("%s Container may have been killed for using too much RAM. Try resubmitting with a higher 'ramMin' or use the arv:OutOfMemoryRetry feature.",
self.arvrunner.label(self))
else:
processStatus = "permanentFail"
- if processStatus == "permanentFail" and record["log_uuid"]:
- logc = arvados.collection.CollectionReader(record["log_uuid"],
- api_client=self.arvrunner.api,
- keep_client=self.arvrunner.keep_client,
- num_retries=self.arvrunner.num_retries)
+ logc = None
+ if record["log_uuid"]:
+ logc = arvados.collection.Collection(record["log_uuid"],
+ api_client=self.arvrunner.api,
+ keep_client=self.arvrunner.keep_client,
+ num_retries=self.arvrunner.num_retries)
+
+ if processStatus == "permanentFail" and logc is not None:
label = self.arvrunner.label(self)
done.logtail(
logc, logger.error,
- "%s (%s) error log:" % (label, record["uuid"]), maxlen=40)
+ "%s (%s) error log:" % (label, record["uuid"]), maxlen=40, include_crunchrun=(rcode is None or rcode > 127))
if record["output_uuid"]:
if self.arvrunner.trash_intermediate or self.arvrunner.intermediate_output_ttl:
@@ -437,6 +526,35 @@ class ArvadosContainer(JobBase):
if container["output"]:
outputs = done.done_outputs(self, container, "/tmp", self.outdir, "/keep")
+
+ properties = record["properties"].copy()
+ properties["cwl_output"] = outputs
+ self.arvrunner.api.container_requests().update(
+ uuid=self.uuid,
+ body={"container_request": {"properties": properties}}
+ ).execute(num_retries=self.arvrunner.num_retries)
+
+ if logc is not None and self.job_runtime.enable_usage_report is not False:
+ try:
+ summarizer = crunchstat_summary.summarizer.ContainerRequestSummarizer(
+ record,
+ collection_object=logc,
+ label=self.name,
+ arv=self.arvrunner.api)
+ summarizer.run()
+ with logc.open("usage_report.html", "wt") as mr:
+ mr.write(summarizer.html_report())
+ logc.save()
+
+ # Post warnings about nodes that are under-utilized.
+ for rc in summarizer._recommend_gen(lambda x: x):
+ self.job_runtime.usage_report_notes.append(rc)
+
+ except Exception as e:
+ logger.warning("%s unable to generate resource usage report",
+ self.arvrunner.label(self),
+ exc_info=(e if self.arvrunner.debug else False))
+
except WorkflowException as e:
# Only include a stack trace if in debug mode.
# A stack trace may obfuscate more useful output about the workflow.
@@ -447,13 +565,14 @@ class ArvadosContainer(JobBase):
logger.exception("%s while getting output object:", self.arvrunner.label(self))
processStatus = "permanentFail"
finally:
- self.output_callback(outputs, processStatus)
+ if not retried:
+ self.output_callback(outputs, processStatus)
class RunnerContainer(Runner):
"""Submit and manage a container that runs arvados-cwl-runner."""
- def arvados_job_spec(self, runtimeContext):
+ def arvados_job_spec(self, runtimeContext, git_info):
"""Create an Arvados container request for this workflow.
The returned dict can be used to create a container passed as
@@ -474,13 +593,19 @@ class RunnerContainer(Runner):
}
self.job_order[param] = {"$include": mnt}
+ container_image = arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext)
+
+ workflow_runner_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+ if workflow_runner_req and workflow_runner_req.get("acrContainerImage"):
+ container_image = workflow_runner_req.get("acrContainerImage")
+
container_req = {
"name": self.name,
"output_path": "/var/spool/cwl",
"cwd": "/var/spool/cwl",
"priority": self.priority,
"state": "Committed",
- "container_image": arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext),
+ "container_image": container_image,
"mounts": {
"/var/lib/cwl/cwl.input.json": {
"kind": "json",
@@ -501,7 +626,7 @@ class RunnerContainer(Runner):
"ram": 1024*1024 * (math.ceil(self.submit_runner_ram) + math.ceil(self.collection_cache_size)),
"API": True
},
- "use_existing": False, # Never reuse the runner container - see #15497.
+ "use_existing": self.reuse_runner,
"properties": {}
}
@@ -514,15 +639,30 @@ class RunnerContainer(Runner):
"kind": "collection",
"portable_data_hash": "%s" % workflowcollection
}
+ elif self.embedded_tool.tool.get("id", "").startswith("arvwf:"):
+ uuid, frg = urllib.parse.urldefrag(self.embedded_tool.tool["id"])
+ workflowpath = "/var/lib/cwl/workflow.json#" + frg
+ packedtxt = self.loadingContext.loader.fetch_text(uuid)
+ yaml = ruamel.yaml.YAML(typ='safe', pure=True)
+ packed = yaml.load(packedtxt)
+ container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
+ "kind": "json",
+ "content": packed
+ }
+ container_req["properties"]["template_uuid"] = self.embedded_tool.tool["id"][6:33]
+ elif self.embedded_tool.tool.get("id", "").startswith("file:"):
+ raise WorkflowException("Tool id '%s' is a local file but expected keep: or arvwf:" % self.embedded_tool.tool.get("id"))
else:
- packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map, runtimeContext)
+ main = self.loadingContext.loader.idx["_:main"]
+ if main.get("id") == "_:main":
+ del main["id"]
workflowpath = "/var/lib/cwl/workflow.json#main"
container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
"kind": "json",
- "content": packed
+ "content": main
}
- if self.embedded_tool.tool.get("id", "").startswith("arvwf:"):
- container_req["properties"]["template_uuid"] = self.embedded_tool.tool["id"][6:33]
+
+ container_req["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in git_info.items()})
properties_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#ProcessProperties")
if properties_req:
@@ -586,6 +726,21 @@ class RunnerContainer(Runner):
if runtimeContext.enable_preemptible is False:
command.append("--disable-preemptible")
+ if runtimeContext.varying_url_params:
+ command.append("--varying-url-params="+runtimeContext.varying_url_params)
+
+ if runtimeContext.prefer_cached_downloads:
+ command.append("--prefer-cached-downloads")
+
+ if runtimeContext.enable_usage_report is True:
+ command.append("--enable-usage-report")
+
+ if runtimeContext.enable_usage_report is False:
+ command.append("--disable-usage-report")
+
+ if self.fast_parser:
+ command.append("--fast-parser")
+
command.extend([workflowpath, "/var/lib/cwl/cwl.input.json"])
container_req["command"] = command
@@ -595,7 +750,7 @@ class RunnerContainer(Runner):
def run(self, runtimeContext):
runtimeContext.keepprefix = "keep:"
- job_spec = self.arvados_job_spec(runtimeContext)
+ job_spec = self.arvados_job_spec(runtimeContext, self.git_info)
if runtimeContext.project_uuid:
job_spec["owner_uuid"] = runtimeContext.project_uuid
@@ -623,14 +778,9 @@ class RunnerContainer(Runner):
logger.info("%s submitted container_request %s", self.arvrunner.label(self), response["uuid"])
- workbench1 = self.arvrunner.api.config()["Services"]["Workbench1"]["ExternalURL"]
workbench2 = self.arvrunner.api.config()["Services"]["Workbench2"]["ExternalURL"]
- url = ""
if workbench2:
url = "{}processes/{}".format(workbench2, response["uuid"])
- elif workbench1:
- url = "{}container_requests/{}".format(workbench1, response["uuid"])
- if url:
logger.info("Monitor workflow progress at %s", url)
diff --git a/sdk/cwl/arvados_cwl/arvdocker.py b/sdk/cwl/arvados_cwl/arvdocker.py
index cf0b3b9daf..f5e67a6649 100644
--- a/sdk/cwl/arvados_cwl/arvdocker.py
+++ b/sdk/cwl/arvados_cwl/arvdocker.py
@@ -17,9 +17,6 @@ import arvados.commands.keepdocker
logger = logging.getLogger('arvados.cwl-runner')
-cached_lookups = {}
-cached_lookups_lock = threading.Lock()
-
def determine_image_id(dockerImageId):
for line in (
subprocess.check_output( # nosec
@@ -56,10 +53,16 @@ def determine_image_id(dockerImageId):
return None
-def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid,
- force_pull, tmp_outdir_prefix, match_local_docker, copy_deps):
+def arv_docker_get_image(api_client, dockerRequirement, pull_image, runtimeContext):
"""Check if a Docker image is available in Keep, if not, upload it using arv-keepdocker."""
+ project_uuid = runtimeContext.project_uuid
+ force_pull = runtimeContext.force_docker_pull
+ tmp_outdir_prefix = runtimeContext.tmp_outdir_prefix
+ match_local_docker = runtimeContext.match_local_docker
+ copy_deps = runtimeContext.copy_deps
+ cached_lookups = runtimeContext.cached_docker_lookups
+
if "http://arvados.org/cwl#dockerCollectionPDH" in dockerRequirement:
return dockerRequirement["http://arvados.org/cwl#dockerCollectionPDH"]
@@ -69,11 +72,8 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid
if hasattr(dockerRequirement, 'lc'):
dockerRequirement.lc.data["dockerImageId"] = dockerRequirement.lc.data["dockerPull"]
- global cached_lookups
- global cached_lookups_lock
- with cached_lookups_lock:
- if dockerRequirement["dockerImageId"] in cached_lookups:
- return cached_lookups[dockerRequirement["dockerImageId"]]
+ if dockerRequirement["dockerImageId"] in cached_lookups:
+ return cached_lookups[dockerRequirement["dockerImageId"]]
with SourceLine(dockerRequirement, "dockerImageId", WorkflowException, logger.isEnabledFor(logging.DEBUG)):
sp = dockerRequirement["dockerImageId"].split(":")
@@ -121,7 +121,8 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid
if not out_of_project_images:
# Fetch Docker image if necessary.
try:
- result = cwltool.docker.DockerCommandLineJob.get_image(dockerRequirement, pull_image,
+ dockerjob = cwltool.docker.DockerCommandLineJob(None, None, None, None, None, None)
+ result = dockerjob.get_image(dockerRequirement, pull_image,
force_pull, tmp_outdir_prefix)
if not result:
raise WorkflowException("Docker image '%s' not available" % dockerRequirement["dockerImageId"])
@@ -153,13 +154,6 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid
pdh = api_client.collections().get(uuid=images[0][0]).execute()["portable_data_hash"]
- with cached_lookups_lock:
- cached_lookups[dockerRequirement["dockerImageId"]] = pdh
+ cached_lookups[dockerRequirement["dockerImageId"]] = pdh
return pdh
-
-def arv_docker_clear_cache():
- global cached_lookups
- global cached_lookups_lock
- with cached_lookups_lock:
- cached_lookups = {}
diff --git a/sdk/cwl/arvados_cwl/arvtool.py b/sdk/cwl/arvados_cwl/arvtool.py
index b66e8ad3aa..86fecc0a1d 100644
--- a/sdk/cwl/arvados_cwl/arvtool.py
+++ b/sdk/cwl/arvados_cwl/arvtool.py
@@ -10,6 +10,7 @@ from ._version import __version__
from functools import partial
from schema_salad.sourceline import SourceLine
from cwltool.errors import WorkflowException
+from arvados.util import portable_data_hash_pattern
def validate_cluster_target(arvrunner, runtimeContext):
if (runtimeContext.submit_runner_cluster and
@@ -61,8 +62,12 @@ class ArvadosCommandTool(CommandLineTool):
(docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
if not docker_req:
- self.hints.append({"class": "DockerRequirement",
- "dockerPull": "arvados/jobs:"+__version__})
+ if portable_data_hash_pattern.match(loadingContext.default_docker_image):
+ self.hints.append({"class": "DockerRequirement",
+ "http://arvados.org/cwl#dockerCollectionPDH": loadingContext.default_docker_image})
+ else:
+ self.hints.append({"class": "DockerRequirement",
+ "dockerPull": loadingContext.default_docker_image})
self.arvrunner = arvrunner
diff --git a/sdk/cwl/arvados_cwl/arvworkflow.py b/sdk/cwl/arvados_cwl/arvworkflow.py
index 51e7cd8b9e..c592b83dc7 100644
--- a/sdk/cwl/arvados_cwl/arvworkflow.py
+++ b/sdk/cwl/arvados_cwl/arvworkflow.py
@@ -9,25 +9,41 @@ import os
import json
import copy
import logging
+import urllib
+from io import StringIO
+import sys
+import re
+
+from typing import (MutableSequence, MutableMapping)
+
+from ruamel.yaml import YAML
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.sourceline import SourceLine, cmap
import schema_salad.ref_resolver
+import arvados.collection
+
from cwltool.pack import pack
from cwltool.load_tool import fetch_document, resolve_and_validate_document
-from cwltool.process import shortname
+from cwltool.process import shortname, uniquename
from cwltool.workflow import Workflow, WorkflowException, WorkflowStep
from cwltool.utils import adjustFileObjs, adjustDirObjs, visit_class, normalizeFilesDirs
-from cwltool.context import LoadingContext
+from cwltool.context import LoadingContext, getdefault
+
+from schema_salad.ref_resolver import file_uri, uri_file_path
import ruamel.yaml as yaml
from .runner import (upload_dependencies, packed_workflow, upload_workflow_collection,
trim_anonymous_location, remove_redundant_fields, discover_secondary_files,
- make_builder, arvados_jobs_image)
+ make_builder, arvados_jobs_image, FileUpdates)
+from .arvcontainer import RunnerContainer
from .pathmapper import ArvPathMapper, trim_listing
from .arvtool import ArvadosCommandTool, set_cluster_target
from ._version import __version__
+from .util import common_prefix
+from .arvdocker import arv_docker_get_image
from .perf import Perf
@@ -37,29 +53,359 @@ metrics = logging.getLogger('arvados.cwl-runner.metrics')
max_res_pars = ("coresMin", "coresMax", "ramMin", "ramMax", "tmpdirMin", "tmpdirMax")
sum_res_pars = ("outdirMin", "outdirMax")
-def upload_workflow(arvRunner, tool, job_order, project_uuid,
- runtimeContext, uuid=None,
- submit_runner_ram=0, name=None, merged_map=None,
- submit_runner_image=None):
+_basetype_re = re.compile(r'''(?:
+Directory
+|File
+|array
+|boolean
+|double
+|enum
+|float
+|int
+|long
+|null
+|record
+|string
+)(?:\[\])?\??''', re.VERBOSE)
+
+def make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool):
+ col = arvados.collection.Collection(api_client=arvRunner.api,
+ keep_client=arvRunner.keep_client)
+
+ with col.open("workflow.json", "wt") as f:
+ json.dump(packed, f, sort_keys=True, indent=4, separators=(',',': '))
+
+ pdh = col.portable_data_hash()
+
+ toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+ if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"):
+ toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe"))
+
+ existing = arvRunner.api.collections().list(filters=[["portable_data_hash", "=", pdh], ["owner_uuid", "=", project_uuid]]).execute(num_retries=arvRunner.num_retries)
+ if len(existing["items"]) == 0:
+ col.save_new(name=toolname, owner_uuid=project_uuid, ensure_unique_name=True)
+
+ # now construct the wrapper
+
+ step = {
+ "id": "#main/" + toolname,
+ "in": [],
+ "out": [],
+ "run": "keep:%s/workflow.json#main" % pdh,
+ "label": name
+ }
+
+ newinputs = []
+ for i in main["inputs"]:
+ inp = {}
+ # Make sure to only copy known fields that are meaningful at
+ # the workflow level. In practice this ensures that if we're
+ # wrapping a CommandLineTool we don't grab inputBinding.
+ # Right now also excludes extension fields, which is fine,
+ # Arvados doesn't currently look for any extension fields on
+ # input parameters.
+ for f in ("type", "label", "secondaryFiles", "streamable",
+ "doc", "id", "format", "loadContents",
+ "loadListing", "default"):
+ if f in i:
+ inp[f] = i[f]
+ newinputs.append(inp)
+
+ wrapper = {
+ "class": "Workflow",
+ "id": "#main",
+ "inputs": newinputs,
+ "outputs": [],
+ "steps": [step]
+ }
+
+ for i in main["inputs"]:
+ step["in"].append({
+ "id": "#main/step/%s" % shortname(i["id"]),
+ "source": i["id"]
+ })
- packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext)
+ for i in main["outputs"]:
+ step["out"].append({"id": "#main/step/%s" % shortname(i["id"])})
+ wrapper["outputs"].append({"outputSource": "#main/step/%s" % shortname(i["id"]),
+ "type": i["type"],
+ "id": i["id"]})
+
+ wrapper["requirements"] = [{"class": "SubworkflowFeatureRequirement"}]
+
+ if main.get("requirements"):
+ wrapper["requirements"].extend(main["requirements"])
+ if main.get("hints"):
+ wrapper["hints"] = main["hints"]
+
+ doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
+
+ if git_info:
+ for g in git_info:
+ doc[g] = git_info[g]
- adjustDirObjs(job_order, trim_listing)
- adjustFileObjs(job_order, trim_anonymous_location)
- adjustDirObjs(job_order, trim_anonymous_location)
+ return json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))
+
+
+def rel_ref(s, baseuri, urlexpander, merged_map, jobmapper):
+ if s.startswith("keep:") or s.startswith("arvwf:"):
+ return s
+
+ uri = urlexpander(s, baseuri)
+
+ if uri.startswith("keep:"):
+ return uri
+
+ fileuri = urllib.parse.urldefrag(baseuri)[0]
+
+ for u in (baseuri, fileuri):
+ if u in merged_map:
+ replacements = merged_map[u].resolved
+ if uri in replacements:
+ return replacements[uri]
+
+ if uri in jobmapper:
+ return jobmapper.mapper(uri).target
+
+ p1 = os.path.dirname(uri_file_path(fileuri))
+ p2 = os.path.dirname(uri_file_path(uri))
+ p3 = os.path.basename(uri_file_path(uri))
+
+ r = os.path.relpath(p2, p1)
+ if r == ".":
+ r = ""
+
+ return os.path.join(r, p3)
+
+def is_basetype(tp):
+ return _basetype_re.match(tp) is not None
+
+def update_refs(api, d, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix):
+ if isinstance(d, MutableSequence):
+ for i, s in enumerate(d):
+ if prefix and isinstance(s, str):
+ if s.startswith(prefix):
+ d[i] = replacePrefix+s[len(prefix):]
+ else:
+ update_refs(api, s, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
+ elif isinstance(d, MutableMapping):
+ for field in ("id", "name"):
+ if isinstance(d.get(field), str) and d[field].startswith("_:"):
+ # blank node reference, was added in automatically, can get rid of it.
+ del d[field]
+
+ if "id" in d:
+ baseuri = urlexpander(d["id"], baseuri, scoped_id=True)
+ elif "name" in d and isinstance(d["name"], str):
+ baseuri = urlexpander(d["name"], baseuri, scoped_id=True)
+
+ if d.get("class") == "DockerRequirement":
+ d["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, d, False,
+ runtimeContext)
+
+ for field in d:
+ if field in ("location", "run", "name") and isinstance(d[field], str):
+ d[field] = rel_ref(d[field], baseuri, urlexpander, merged_map, jobmapper)
+ continue
- main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
- for inp in main["inputs"]:
- sn = shortname(inp["id"])
- if sn in job_order:
- inp["default"] = job_order[sn]
+ if field in ("$include", "$import") and isinstance(d[field], str):
+ d[field] = rel_ref(d[field], baseuri, urlexpander, {}, jobmapper)
+ continue
- if not name:
- name = tool.tool.get("label", os.path.basename(tool.tool["id"]))
+ for t in ("type", "items"):
+ if (field == t and
+ isinstance(d[t], str) and
+ not is_basetype(d[t])):
+ d[t] = rel_ref(d[t], baseuri, urlexpander, merged_map, jobmapper)
+ continue
- upload_dependencies(arvRunner, name, tool.doc_loader,
- packed, tool.tool["id"], False,
- runtimeContext)
+ if field == "inputs" and isinstance(d["inputs"], MutableMapping):
+ for inp in d["inputs"]:
+ if isinstance(d["inputs"][inp], str) and not is_basetype(d["inputs"][inp]):
+ d["inputs"][inp] = rel_ref(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper)
+ if isinstance(d["inputs"][inp], MutableMapping):
+ update_refs(api, d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
+ continue
+
+ if field in ("requirements", "hints") and isinstance(d[field], MutableMapping):
+ dr = d[field].get("DockerRequirement")
+ if dr:
+ dr["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, dr, False,
+ runtimeContext)
+
+ if field == "$schemas":
+ for n, s in enumerate(d["$schemas"]):
+ d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map, jobmapper)
+ continue
+
+ update_refs(api, d[field], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
+
+
+def fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh):
+ req = copy.deepcopy(req)
+
+ for f in req["types"]:
+ r = f["name"]
+ path, frag = urllib.parse.urldefrag(r)
+ rel = rel_ref(r, baseuri, urlexpander, merged_map, jobmapper)
+ merged_map.setdefault(path, FileUpdates({}, {}))
+ rename = "keep:%s/%s" %(pdh, rel)
+ for mm in merged_map:
+ merged_map[mm].resolved[r] = rename
+ return req
+
+
+def drop_ids(d):
+ if isinstance(d, MutableSequence):
+ for i, s in enumerate(d):
+ drop_ids(s)
+ elif isinstance(d, MutableMapping):
+ if "id" in d and d["id"].startswith("file:"):
+ del d["id"]
+
+ for field in d:
+ drop_ids(d[field])
+
+
+def upload_workflow(arvRunner, tool, job_order, project_uuid,
+ runtimeContext,
+ uuid=None,
+ submit_runner_ram=0, name=None, merged_map=None,
+ submit_runner_image=None,
+ git_info=None,
+ set_defaults=False,
+ jobmapper=None):
+
+ firstfile = None
+ workflow_files = set()
+ import_files = set()
+ include_files = set()
+
+ # The document loader index will have entries for all the files
+ # that were loaded in the process of parsing the entire workflow
+ # (including subworkflows, tools, imports, etc). We use this to
+ # get compose a list of the workflow file dependencies.
+ for w in tool.doc_loader.idx:
+ if w.startswith("file://"):
+ workflow_files.add(urllib.parse.urldefrag(w)[0])
+ if firstfile is None:
+ firstfile = urllib.parse.urldefrag(w)[0]
+ if w.startswith("import:file://"):
+ import_files.add(urllib.parse.urldefrag(w[7:])[0])
+ if w.startswith("include:file://"):
+ include_files.add(urllib.parse.urldefrag(w[8:])[0])
+
+ all_files = workflow_files | import_files | include_files
+
+ # Find the longest common prefix among all the file names. We'll
+ # use this to recreate the directory structure in a keep
+ # collection with correct relative references.
+ prefix = common_prefix(firstfile, all_files) if firstfile else ""
+
+
+ col = arvados.collection.Collection(api_client=arvRunner.api)
+
+ # Now go through all the files and update references to other
+ # files. We previously scanned for file dependencies, these are
+ # are passed in as merged_map.
+ #
+ # note about merged_map: we upload dependencies of each process
+ # object (CommandLineTool/Workflow) to a separate collection.
+ # That way, when the user edits something, this limits collection
+ # PDH changes to just that tool, and minimizes situations where
+ # small changes break container reuse for the whole workflow.
+ #
+ for w in workflow_files | import_files:
+ # 1. load the YAML file
+
+ text = tool.doc_loader.fetch_text(w)
+ if isinstance(text, bytes):
+ textIO = StringIO(text.decode('utf-8'))
+ else:
+ textIO = StringIO(text)
+
+ yamlloader = schema_salad.utils.yaml_no_ts()
+ result = yamlloader.load(textIO)
+
+ # If the whole document is in "flow style" it is probably JSON
+ # formatted. We'll re-export it as JSON because the
+ # ruamel.yaml round-trip mode is a lie and only preserves
+ # "block style" formatting and not "flow style" formatting.
+ export_as_json = result.fa.flow_style()
+
+ # 2. find $import, $include, $schema, run, location
+ # 3. update field value
+ update_refs(arvRunner.api, result, w, tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, "", "")
+
+ # Write the updated file to the collection.
+ with col.open(w[len(prefix):], "wt") as f:
+ if export_as_json:
+ json.dump(result, f, indent=4, separators=(',',': '))
+ else:
+ yamlloader.dump(result, stream=f)
+
+ # Also store a verbatim copy of the original files
+ with col.open(os.path.join("original", w[len(prefix):]), "wt") as f:
+ f.write(text)
+
+
+ # Upload files referenced by $include directives, these are used
+ # unchanged and don't need to be updated.
+ for w in include_files:
+ with col.open(w[len(prefix):], "wb") as f1:
+ with col.open(os.path.join("original", w[len(prefix):]), "wb") as f3:
+ with open(uri_file_path(w), "rb") as f2:
+ dat = f2.read(65536)
+ while dat:
+ f1.write(dat)
+ f3.write(dat)
+ dat = f2.read(65536)
+
+ # Now collect metadata: the collection name and git properties.
+
+ toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+ if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"):
+ toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe"))
+
+ toolfile = tool.tool["id"][len(prefix):]
+
+ properties = {
+ "type": "workflow",
+ "arv:workflowMain": toolfile,
+ }
+
+ if git_info:
+ for g in git_info:
+ p = g.split("#", 1)[1]
+ properties["arv:"+p] = git_info[g]
+
+ # Check if a collection with the same content already exists in the target project. If so, just use that one.
+ existing = arvRunner.api.collections().list(filters=[["portable_data_hash", "=", col.portable_data_hash()],
+ ["owner_uuid", "=", arvRunner.project_uuid]]).execute(num_retries=arvRunner.num_retries)
+
+ if len(existing["items"]) == 0:
+ toolname = toolname.replace("/", " ")
+ col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties)
+ logger.info("Workflow uploaded to %s", col.manifest_locator())
+ else:
+ logger.info("Workflow uploaded to %s", existing["items"][0]["uuid"])
+
+ # Now that we've updated the workflow and saved it to a
+ # collection, we're going to construct a minimal "wrapper"
+ # workflow which consists of only of input and output parameters
+ # connected to a single step that runs the real workflow.
+
+ runfile = "keep:%s/%s" % (col.portable_data_hash(), toolfile)
+
+ step = {
+ "id": "#main/" + toolname,
+ "in": [],
+ "out": [],
+ "run": runfile,
+ "label": name
+ }
+
+ main = tool.tool
wf_runner_resources = None
@@ -74,30 +420,113 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid,
wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"}
hints.append(wf_runner_resources)
- wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
- submit_runner_image or "arvados/jobs:"+__version__,
- runtimeContext)
+ if "acrContainerImage" not in wf_runner_resources:
+ wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
+ submit_runner_image or "arvados/jobs:"+__version__,
+ runtimeContext)
if submit_runner_ram:
wf_runner_resources["ramMin"] = submit_runner_ram
- main["hints"] = hints
+ # Remove a few redundant fields from the "job order" (aka input
+ # object or input parameters). In the situation where we're
+ # creating or updating a workflow record, any values in the job
+ # order get copied over as default values for input parameters.
+ adjustDirObjs(job_order, trim_listing)
+ adjustFileObjs(job_order, trim_anonymous_location)
+ adjustDirObjs(job_order, trim_anonymous_location)
+
+ newinputs = []
+ for i in main["inputs"]:
+ inp = {}
+ # Make sure to only copy known fields that are meaningful at
+ # the workflow level. In practice this ensures that if we're
+ # wrapping a CommandLineTool we don't grab inputBinding.
+ # Right now also excludes extension fields, which is fine,
+ # Arvados doesn't currently look for any extension fields on
+ # input parameters.
+ for f in ("type", "label", "secondaryFiles", "streamable",
+ "doc", "format", "loadContents",
+ "loadListing", "default"):
+ if f in i:
+ inp[f] = i[f]
+
+ if set_defaults:
+ sn = shortname(i["id"])
+ if sn in job_order:
+ inp["default"] = job_order[sn]
+
+ inp["id"] = "#main/%s" % shortname(i["id"])
+ newinputs.append(inp)
+
+ wrapper = {
+ "class": "Workflow",
+ "id": "#main",
+ "inputs": newinputs,
+ "outputs": [],
+ "steps": [step]
+ }
+
+ for i in main["inputs"]:
+ step["in"].append({
+ "id": "#main/step/%s" % shortname(i["id"]),
+ "source": "#main/%s" % shortname(i["id"])
+ })
+
+ for i in main["outputs"]:
+ step["out"].append({"id": "#main/step/%s" % shortname(i["id"])})
+ wrapper["outputs"].append({"outputSource": "#main/step/%s" % shortname(i["id"]),
+ "type": i["type"],
+ "id": "#main/%s" % shortname(i["id"])})
+
+ wrapper["requirements"] = [{"class": "SubworkflowFeatureRequirement"}]
+
+ if main.get("requirements"):
+ wrapper["requirements"].extend(main["requirements"])
+ if hints:
+ wrapper["hints"] = hints
+
+ # Schema definitions (this lets you define things like record
+ # types) require a special handling.
+
+ for i, r in enumerate(wrapper["requirements"]):
+ if r["class"] == "SchemaDefRequirement":
+ wrapper["requirements"][i] = fix_schemadef(r, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, col.portable_data_hash())
+
+ update_refs(arvRunner.api, wrapper, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, main["id"]+"#", "#main/")
+
+ doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
+
+ if git_info:
+ for g in git_info:
+ doc[g] = git_info[g]
+
+ # Remove any lingering file references.
+ drop_ids(wrapper)
+
+ return doc
+
+
+def make_workflow_record(arvRunner, doc, name, tool, project_uuid, update_uuid):
+
+ wrappertext = json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))
body = {
"workflow": {
"name": name,
"description": tool.tool.get("doc", ""),
- "definition":json.dumps(packed, sort_keys=True, indent=4, separators=(',',': '))
+ "definition": wrappertext
}}
if project_uuid:
body["workflow"]["owner_uuid"] = project_uuid
- if uuid:
- call = arvRunner.api.workflows().update(uuid=uuid, body=body)
+ if update_uuid:
+ call = arvRunner.api.workflows().update(uuid=update_uuid, body=body)
else:
call = arvRunner.api.workflows().create(body=body)
return call.execute(num_retries=arvRunner.num_retries)["uuid"]
+
def dedup_reqs(reqs):
dedup = {}
for r in reversed(reqs):
@@ -146,8 +575,13 @@ class ArvadosWorkflowStep(WorkflowStep):
**argv
): # type: (...) -> None
- super(ArvadosWorkflowStep, self).__init__(toolpath_object, pos, loadingContext, *argc, **argv)
- self.tool["class"] = "WorkflowStep"
+ if arvrunner.fast_submit:
+ self.tool = toolpath_object
+ self.tool["inputs"] = []
+ self.tool["outputs"] = []
+ else:
+ super(ArvadosWorkflowStep, self).__init__(toolpath_object, pos, loadingContext, *argc, **argv)
+ self.tool["class"] = "WorkflowStep"
self.arvrunner = arvrunner
def job(self, joborder, output_callback, runtimeContext):
@@ -169,27 +603,29 @@ class ArvadosWorkflow(Workflow):
self.dynamic_resource_req = []
self.static_resource_req = []
self.wf_reffiles = []
- self.loadingContext = loadingContext
- super(ArvadosWorkflow, self).__init__(toolpath_object, loadingContext)
- self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget")
+ self.loadingContext = loadingContext.copy()
- def job(self, joborder, output_callback, runtimeContext):
+ self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
+ tool_requirements = toolpath_object.get("requirements", [])
+ self.hints = copy.deepcopy(getdefault(loadingContext.hints, []))
+ tool_hints = toolpath_object.get("hints", [])
- builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)
- runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+ workflow_runner_req, _ = self.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+ if workflow_runner_req and workflow_runner_req.get("acrContainerImage"):
+ self.loadingContext.default_docker_image = workflow_runner_req.get("acrContainerImage")
- req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
- if not req:
- return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)
+ super(ArvadosWorkflow, self).__init__(toolpath_object, self.loadingContext)
+ self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget")
- # RunInSingleContainer is true
+ def runInSingleContainer(self, joborder, output_callback, runtimeContext, builder):
with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
if "id" not in self.tool:
raise WorkflowException("%s object must have 'id'" % (self.tool["class"]))
discover_secondary_files(self.arvrunner.fs_access, builder,
self.tool["inputs"], joborder)
+
normalizeFilesDirs(joborder)
with Perf(metrics, "subworkflow upload_deps"):
@@ -198,7 +634,6 @@ class ArvadosWorkflow(Workflow):
self.doc_loader,
joborder,
joborder.get("id", "#"),
- False,
runtimeContext)
if self.wf_pdh is None:
@@ -242,7 +677,6 @@ class ArvadosWorkflow(Workflow):
self.doc_loader,
packed,
self.tool["id"],
- False,
runtimeContext)
# Discover files/directories referenced by the
@@ -347,6 +781,51 @@ class ArvadosWorkflow(Workflow):
})
return ArvadosCommandTool(self.arvrunner, wf_runner, self.loadingContext).job(joborder_resolved, output_callback, runtimeContext)
+
+ def separateRunner(self, joborder, output_callback, runtimeContext, req, builder):
+
+ name = runtimeContext.name
+
+ rpn = req.get("runnerProcessName")
+ if rpn:
+ name = builder.do_eval(rpn)
+
+ return RunnerContainer(self.arvrunner,
+ self,
+ self.loadingContext,
+ runtimeContext.enable_reuse,
+ None,
+ None,
+ submit_runner_ram=runtimeContext.submit_runner_ram,
+ name=name,
+ on_error=runtimeContext.on_error,
+ submit_runner_image=runtimeContext.submit_runner_image,
+ intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
+ merged_map=None,
+ priority=runtimeContext.priority,
+ secret_store=self.arvrunner.secret_store,
+ collection_cache_size=runtimeContext.collection_cache_size,
+ collection_cache_is_default=self.arvrunner.should_estimate_cache_size,
+ git_info=runtimeContext.git_info,
+ reuse_runner=True).job(joborder, output_callback, runtimeContext)
+
+
+ def job(self, joborder, output_callback, runtimeContext):
+
+ builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)
+ runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+
+ req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
+ if req:
+ return self.runInSingleContainer(joborder, output_callback, runtimeContext, builder)
+
+ req, _ = self.get_requirement("http://arvados.org/cwl#SeparateRunner")
+ if req:
+ return self.separateRunner(joborder, output_callback, runtimeContext, req, builder)
+
+ return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)
+
+
def make_workflow_step(self,
toolpath_object, # type: Dict[Text, Any]
pos, # type: int
diff --git a/sdk/cwl/arvados_cwl/context.py b/sdk/cwl/arvados_cwl/context.py
index 64f85e2076..60ea9bdff5 100644
--- a/sdk/cwl/arvados_cwl/context.py
+++ b/sdk/cwl/arvados_cwl/context.py
@@ -7,6 +7,7 @@ from collections import namedtuple
class ArvLoadingContext(LoadingContext):
def __init__(self, kwargs=None):
+ self.default_docker_image = None
super(ArvLoadingContext, self).__init__(kwargs)
class ArvRuntimeContext(RuntimeContext):
@@ -39,6 +40,14 @@ class ArvRuntimeContext(RuntimeContext):
self.match_local_docker = False
self.enable_preemptible = None
self.copy_deps = None
+ self.defer_downloads = False
+ self.varying_url_params = ""
+ self.prefer_cached_downloads = False
+ self.cached_docker_lookups = {}
+ self.print_keep_deps = False
+ self.git_info = {}
+ self.enable_usage_report = None
+ self.usage_report_notes = []
super(ArvRuntimeContext, self).__init__(kwargs)
diff --git a/sdk/cwl/arvados_cwl/done.py b/sdk/cwl/arvados_cwl/done.py
index e12fe185a0..5c12419765 100644
--- a/sdk/cwl/arvados_cwl/done.py
+++ b/sdk/cwl/arvados_cwl/done.py
@@ -57,43 +57,45 @@ def done_outputs(self, record, tmpdir, outdir, keepdir):
crunchstat_re = re.compile(r"^\d{4}-\d\d-\d\d_\d\d:\d\d:\d\d [a-z0-9]{5}-8i9sb-[a-z0-9]{15} \d+ \d+ stderr crunchstat:")
timestamp_re = re.compile(r"^(\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d+Z) (.*)")
-def logtail(logcollection, logfunc, header, maxlen=25):
+def logtail(logcollection, logfunc, header, maxlen=25, include_crunchrun=True):
if len(logcollection) == 0:
logfunc("%s\n%s", header, " ** log is empty **")
return
- containersapi = ("crunch-run.txt" in logcollection)
mergelogs = {}
+ logfiles = ["stdout.txt", "stderr.txt"]
- for log in list(logcollection):
- if not containersapi or log in ("crunch-run.txt", "stdout.txt", "stderr.txt"):
- logname = log[:-4]
- logt = deque([], maxlen)
- mergelogs[logname] = logt
- with logcollection.open(log, encoding="utf-8") as f:
- for l in f:
- if containersapi:
- g = timestamp_re.match(l)
- logt.append((g.group(1), g.group(2)))
- elif not crunchstat_re.match(l):
- logt.append(l)
-
- if containersapi:
- keys = list(mergelogs)
- loglines = []
- while True:
- earliest = None
- for k in keys:
- if mergelogs[k]:
- if earliest is None or mergelogs[k][0][0] < mergelogs[earliest][0][0]:
- earliest = k
- if earliest is None:
- break
- ts, msg = mergelogs[earliest].popleft()
- loglines.append("%s %s %s" % (ts, earliest, msg))
- loglines = loglines[-maxlen:]
- else:
- loglines = mergelogs[list(mergelogs)[0]]
+ if include_crunchrun:
+ logfiles.append("crunch-run.txt")
+
+ for log in logfiles:
+ if log not in logcollection:
+ continue
+ logname = log[:-4] # trim off the .txt
+ logt = deque([], maxlen)
+ mergelogs[logname] = logt
+ with logcollection.open(log, encoding="utf-8") as f:
+ for l in f:
+ g = timestamp_re.match(l)
+ logt.append((g.group(1), g.group(2)))
+
+ keys = list(mergelogs)
+ loglines = []
+
+ # we assume the log lines are all in order so this this is a
+ # straight linear merge where we look at the next timestamp of
+ # each log and take whichever one is earliest.
+ while True:
+ earliest = None
+ for k in keys:
+ if mergelogs[k]:
+ if earliest is None or mergelogs[k][0][0] < mergelogs[earliest][0][0]:
+ earliest = k
+ if earliest is None:
+ break
+ ts, msg = mergelogs[earliest].popleft()
+ loglines.append("%s %s %s" % (ts, earliest, msg))
+ loglines = loglines[-maxlen:]
logtxt = "\n ".join(l.strip() for l in loglines)
- logfunc("%s\n\n %s", header, logtxt)
+ logfunc("%s\n\n %s\n", header, logtxt)
diff --git a/sdk/cwl/arvados_cwl/executor.py b/sdk/cwl/arvados_cwl/executor.py
index 3241fb607c..432b380aab 100644
--- a/sdk/cwl/arvados_cwl/executor.py
+++ b/sdk/cwl/arvados_cwl/executor.py
@@ -17,13 +17,15 @@ import copy
import json
import re
from functools import partial
+import subprocess
import time
import urllib
from cwltool.errors import WorkflowException
import cwltool.workflow
-from schema_salad.sourceline import SourceLine
+from schema_salad.sourceline import SourceLine, cmap
import schema_salad.validate as validate
+from schema_salad.ref_resolver import file_uri, uri_file_path
import arvados
import arvados.config
@@ -32,9 +34,9 @@ from arvados.errors import ApiError
import arvados_cwl.util
from .arvcontainer import RunnerContainer, cleanup_name_for_collection
-from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder
+from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder, update_from_merged_map, print_keep_deps
from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
-from .arvworkflow import ArvadosWorkflow, upload_workflow
+from .arvworkflow import ArvadosWorkflow, upload_workflow, make_workflow_record
from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
from .perf import Perf
from .pathmapper import NoFollowPathMapper
@@ -68,6 +70,10 @@ class RuntimeStatusLoggingHandler(logging.Handler):
kind = 'error'
elif record.levelno >= logging.WARNING:
kind = 'warning'
+ if kind == 'warning' and record.name in ("salad", "crunchstat_summary"):
+ # Don't send validation warnings to runtime status,
+ # they're noisy and unhelpful.
+ return
if kind is not None and self.updatingRuntimeStatus is not True:
self.updatingRuntimeStatus = True
try:
@@ -110,6 +116,9 @@ class ArvCwlExecutor(object):
arvargs.output_tags = None
arvargs.thread_count = 1
arvargs.collection_cache_size = None
+ arvargs.git_info = True
+ arvargs.submit = False
+ arvargs.defer_downloads = False
self.api = api_client
self.processes = {}
@@ -135,6 +144,9 @@ class ArvCwlExecutor(object):
self.fs_access = None
self.secret_store = None
self.stdout = stdout
+ self.fast_submit = False
+ self.git_info = arvargs.git_info
+ self.debug = False
if keep_client is not None:
self.keep_client = keep_client
@@ -201,6 +213,8 @@ The 'jobs' API is no longer supported.
self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,
collection_cache=self.collection_cache)
+ self.defer_downloads = arvargs.submit and arvargs.defer_downloads
+
validate_cluster_target(self, self.toplevel_runtimeContext)
@@ -252,6 +266,11 @@ The 'jobs' API is no longer supported.
Called when there's a need to report errors, warnings or just
activity statuses, for example in the RuntimeStatusLoggingHandler.
"""
+
+ if kind not in ('error', 'warning', 'activity'):
+ # Ignore any other status kind
+ return
+
with self.workflow_eval_lock:
current = None
try:
@@ -261,32 +280,35 @@ The 'jobs' API is no longer supported.
if current is None:
return
runtime_status = current.get('runtime_status', {})
- if kind in ('error', 'warning'):
- updatemessage = runtime_status.get(kind, "")
- if not updatemessage:
- updatemessage = message
-
- # Subsequent messages tacked on in detail
- updatedetail = runtime_status.get(kind+'Detail', "")
- maxlines = 40
- if updatedetail.count("\n") < maxlines:
- if updatedetail:
- updatedetail += "\n"
- updatedetail += message + "\n"
-
- if detail:
- updatedetail += detail + "\n"
-
- if updatedetail.count("\n") >= maxlines:
- updatedetail += "\nSome messages may have been omitted. Check the full log."
-
- runtime_status.update({
- kind: updatemessage,
- kind+'Detail': updatedetail,
- })
- else:
- # Ignore any other status kind
+
+ original_updatemessage = updatemessage = runtime_status.get(kind, "")
+ if kind == "activity" or not updatemessage:
+ updatemessage = message
+
+ # Subsequent messages tacked on in detail
+ original_updatedetail = updatedetail = runtime_status.get(kind+'Detail', "")
+ maxlines = 40
+ if updatedetail.count("\n") < maxlines:
+ if updatedetail:
+ updatedetail += "\n"
+ updatedetail += message + "\n"
+
+ if detail:
+ updatedetail += detail + "\n"
+
+ if updatedetail.count("\n") >= maxlines:
+ updatedetail += "\nSome messages may have been omitted. Check the full log."
+
+ if updatemessage == original_updatemessage and updatedetail == original_updatedetail:
+ # don't waste time doing an update if nothing changed
+ # (usually because we exceeded the max lines)
return
+
+ runtime_status.update({
+ kind: updatemessage,
+ kind+'Detail': updatedetail,
+ })
+
try:
self.api.containers().update(uuid=current['uuid'],
body={
@@ -347,9 +369,11 @@ The 'jobs' API is no longer supported.
while keys:
page = keys[:pageSize]
try:
- proc_states = table.list(filters=[["uuid", "in", page]]).execute(num_retries=self.num_retries)
- except Exception:
- logger.exception("Error checking states on API server: %s")
+ proc_states = table.list(filters=[["uuid", "in", page]], select=["uuid", "container_uuid", "state", "log_uuid",
+ "output_uuid", "modified_at", "properties",
+ "runtime_constraints"]).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.warning("Temporary error checking states on API server: %s", e)
remain_wait = self.poll_interval
continue
@@ -510,15 +534,86 @@ The 'jobs' API is no longer supported.
for req in job_reqs:
tool.requirements.append(req)
+ @staticmethod
+ def get_git_info(tool):
+ in_a_git_repo = False
+ cwd = None
+ filepath = None
+
+ if tool.tool["id"].startswith("file://"):
+ # check if git is installed
+ try:
+ filepath = uri_file_path(tool.tool["id"])
+ cwd = os.path.dirname(filepath)
+ subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, check=True, capture_output=True, text=True)
+ in_a_git_repo = True
+ except Exception as e:
+ pass
+
+ gitproperties = {}
+
+ if in_a_git_repo:
+ git_commit = subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_date = subprocess.run(["git", "log", "--format=%cD", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_committer = subprocess.run(["git", "log", "--format=%cn <%ce>", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_branch = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_origin = subprocess.run(["git", "remote", "get-url", "origin"], cwd=cwd, capture_output=True, text=True).stdout
+ git_status = subprocess.run(["git", "status", "--untracked-files=no", "--porcelain"], cwd=cwd, capture_output=True, text=True).stdout
+ git_describe = subprocess.run(["git", "describe", "--always", "--tags"], cwd=cwd, capture_output=True, text=True).stdout
+ git_toplevel = subprocess.run(["git", "rev-parse", "--show-toplevel"], cwd=cwd, capture_output=True, text=True).stdout
+ git_path = filepath[len(git_toplevel):]
+
+ gitproperties = {
+ "http://arvados.org/cwl#gitCommit": git_commit.strip(),
+ "http://arvados.org/cwl#gitDate": git_date.strip(),
+ "http://arvados.org/cwl#gitCommitter": git_committer.strip(),
+ "http://arvados.org/cwl#gitBranch": git_branch.strip(),
+ "http://arvados.org/cwl#gitOrigin": git_origin.strip(),
+ "http://arvados.org/cwl#gitStatus": git_status.strip(),
+ "http://arvados.org/cwl#gitDescribe": git_describe.strip(),
+ "http://arvados.org/cwl#gitPath": git_path.strip(),
+ }
+ else:
+ for g in ("http://arvados.org/cwl#gitCommit",
+ "http://arvados.org/cwl#gitDate",
+ "http://arvados.org/cwl#gitCommitter",
+ "http://arvados.org/cwl#gitBranch",
+ "http://arvados.org/cwl#gitOrigin",
+ "http://arvados.org/cwl#gitStatus",
+ "http://arvados.org/cwl#gitDescribe",
+ "http://arvados.org/cwl#gitPath"):
+ if g in tool.metadata:
+ gitproperties[g] = tool.metadata[g]
+
+ return gitproperties
+
+ def set_container_request_properties(self, container, properties):
+ resp = self.api.container_requests().list(filters=[["container_uuid", "=", container["uuid"]]], select=["uuid", "properties"]).execute(num_retries=self.num_retries)
+ for cr in resp["items"]:
+ cr["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in properties.items()})
+ self.api.container_requests().update(uuid=cr["uuid"], body={"container_request": {"properties": cr["properties"]}}).execute(num_retries=self.num_retries)
+
def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
+ self.runtime_status_update("activity", "initialization")
+
+ git_info = self.get_git_info(updated_tool) if self.git_info else {}
+ if git_info:
+ logger.info("Git provenance")
+ for g in git_info:
+ if git_info[g]:
+ logger.info(" %s: %s", g.split("#", 1)[1], git_info[g])
+
+ runtimeContext.git_info = git_info
+
workbench1 = self.api.config()["Services"]["Workbench1"]["ExternalURL"]
workbench2 = self.api.config()["Services"]["Workbench2"]["ExternalURL"]
controller = self.api.config()["Services"]["Controller"]["ExternalURL"]
logger.info("Using cluster %s (%s)", self.api.config()["ClusterID"], workbench2 or workbench1 or controller)
- updated_tool.visit(self.check_features)
+ if not self.fast_submit:
+ updated_tool.visit(self.check_features)
self.pipeline = None
self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
@@ -546,7 +641,10 @@ The 'jobs' API is no longer supported.
runtimeContext.intermediate_storage_classes = default_storage_classes
if not runtimeContext.name:
- runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+ self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+ if git_info.get("http://arvados.org/cwl#gitDescribe"):
+ self.name = "%s (%s)" % (self.name, git_info.get("http://arvados.org/cwl#gitDescribe"))
+ runtimeContext.name = self.name
if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
# When creating or updating workflow record, by default
@@ -555,6 +653,10 @@ The 'jobs' API is no longer supported.
runtimeContext.copy_deps = True
runtimeContext.match_local_docker = True
+ if runtimeContext.print_keep_deps:
+ runtimeContext.copy_deps = False
+ runtimeContext.match_local_docker = False
+
if runtimeContext.update_workflow and self.project_uuid is None:
# If we are updating a workflow, make sure anything that
# gets uploaded goes into the same parent project, unless
@@ -564,66 +666,95 @@ The 'jobs' API is no longer supported.
self.project_uuid = runtimeContext.project_uuid
+ self.runtime_status_update("activity", "data transfer")
+
# Upload local file references in the job order.
with Perf(metrics, "upload_job_order"):
- job_order = upload_job_order(self, "%s input" % runtimeContext.name,
+ job_order, jobmapper = upload_job_order(self, "%s input" % runtimeContext.name,
updated_tool, job_order, runtimeContext)
+ # determine if we are submitting or directly executing the workflow.
+ #
# the last clause means: if it is a command line tool, and we
# are going to wait for the result, and always_submit_runner
# is false, then we don't submit a runner process.
- submitting = (runtimeContext.update_workflow or
- runtimeContext.create_workflow or
- (runtimeContext.submit and not
+ submitting = (runtimeContext.submit and not
(updated_tool.tool["class"] == "CommandLineTool" and
runtimeContext.wait and
- not runtimeContext.always_submit_runner)))
+ not runtimeContext.always_submit_runner))
loadingContext = self.loadingContext.copy()
loadingContext.do_validate = False
loadingContext.disable_js_validation = True
- if submitting:
- loadingContext.do_update = False
- # Document may have been auto-updated. Reload the original
- # document with updating disabled because we want to
- # submit the document with its original CWL version, not
- # the auto-updated one.
- with Perf(metrics, "load_tool original"):
- tool = load_tool(updated_tool.tool["id"], loadingContext)
- else:
- tool = updated_tool
+ tool = updated_tool
# Upload direct dependencies of workflow steps, get back mapping of files to keep references.
# Also uploads docker images.
- logger.info("Uploading workflow dependencies")
- with Perf(metrics, "upload_workflow_deps"):
- merged_map = upload_workflow_deps(self, tool, runtimeContext)
-
- # Recreate process object (ArvadosWorkflow or
- # ArvadosCommandTool) because tool document may have been
- # updated by upload_workflow_deps in ways that modify
- # inheritance of hints or requirements.
+ if not self.fast_submit:
+ logger.info("Uploading workflow dependencies")
+ with Perf(metrics, "upload_workflow_deps"):
+ merged_map = upload_workflow_deps(self, tool, runtimeContext)
+ else:
+ # in the fast submit case, we are running a workflow that
+ # has already been uploaded to Arvados, so we assume all
+ # the dependencies have been pinned to keep references and
+ # there is nothing to do.
+ merged_map = {}
+
loadingContext.loader = tool.doc_loader
loadingContext.avsc_names = tool.doc_schema
loadingContext.metadata = tool.metadata
- with Perf(metrics, "load_tool"):
- tool = load_tool(tool.tool, loadingContext)
-
- if runtimeContext.update_workflow or runtimeContext.create_workflow:
- # Create a pipeline template or workflow record and exit.
- if self.work_api == "containers":
- uuid = upload_workflow(self, tool, job_order,
- runtimeContext.project_uuid,
- runtimeContext,
- uuid=runtimeContext.update_workflow,
- submit_runner_ram=runtimeContext.submit_runner_ram,
- name=runtimeContext.name,
- merged_map=merged_map,
- submit_runner_image=runtimeContext.submit_runner_image)
+ loadingContext.skip_resolve_all = True
+
+ workflow_wrapper = None
+ if (submitting and not self.fast_submit) or runtimeContext.update_workflow or runtimeContext.create_workflow or runtimeContext.print_keep_deps:
+ # upload workflow and get back the workflow wrapper
+
+ workflow_wrapper = upload_workflow(self, tool, job_order,
+ runtimeContext.project_uuid,
+ runtimeContext,
+ uuid=runtimeContext.update_workflow,
+ submit_runner_ram=runtimeContext.submit_runner_ram,
+ name=runtimeContext.name,
+ merged_map=merged_map,
+ submit_runner_image=runtimeContext.submit_runner_image,
+ git_info=git_info,
+ set_defaults=(runtimeContext.update_workflow or runtimeContext.create_workflow),
+ jobmapper=jobmapper)
+
+ if runtimeContext.update_workflow or runtimeContext.create_workflow:
+ # We're registering the workflow, so create or update
+ # the workflow record and then exit.
+ uuid = make_workflow_record(self, workflow_wrapper, runtimeContext.name, tool,
+ runtimeContext.project_uuid, runtimeContext.update_workflow)
self.stdout.write(uuid + "\n")
return (None, "success")
+ if runtimeContext.print_keep_deps:
+ # Just find and print out all the collection dependencies and exit
+ print_keep_deps(self, runtimeContext, merged_map, tool)
+ return (None, "success")
+
+ # Did not register a workflow, we're going to submit
+ # it instead.
+ loadingContext.loader.idx.clear()
+ loadingContext.loader.idx["_:main"] = workflow_wrapper
+ workflow_wrapper["id"] = "_:main"
+
+ # Reload the minimal wrapper workflow.
+ self.fast_submit = True
+ tool = load_tool(workflow_wrapper, loadingContext)
+ loadingContext.loader.idx["_:main"] = workflow_wrapper
+
+ if not submitting:
+ # If we are going to run the workflow now (rather than
+ # submit it), we need to update the workflow document
+ # replacing file references with keep references. If we
+ # are just going to construct a run submission, we don't
+ # need to do this.
+ update_from_merged_map(tool, merged_map)
+
self.apply_reqs(job_order, tool)
self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
@@ -666,11 +797,14 @@ The 'jobs' API is no longer supported.
runnerjob = None
if runtimeContext.submit:
- # Submit a runner job to run the workflow for us.
+ # We are submitting instead of running immediately.
+ #
+ # Create a "Runner job" that when run() is invoked,
+ # creates the container request to run the workflow.
if self.work_api == "containers":
if submitting:
- tool = RunnerContainer(self, updated_tool,
- tool, loadingContext, runtimeContext.enable_reuse,
+ loadingContext.metadata = updated_tool.metadata.copy()
+ tool = RunnerContainer(self, tool, loadingContext, runtimeContext.enable_reuse,
self.output_name,
self.output_tags,
submit_runner_ram=runtimeContext.submit_runner_ram,
@@ -682,7 +816,8 @@ The 'jobs' API is no longer supported.
priority=runtimeContext.priority,
secret_store=self.secret_store,
collection_cache_size=runtimeContext.collection_cache_size,
- collection_cache_is_default=self.should_estimate_cache_size)
+ collection_cache_is_default=self.should_estimate_cache_size,
+ git_info=git_info)
else:
runtimeContext.runnerjob = tool.tool["id"]
@@ -694,14 +829,22 @@ The 'jobs' API is no longer supported.
runtimeContext)
if runtimeContext.submit and not runtimeContext.wait:
+ # User provided --no-wait so submit the container request,
+ # get the container request uuid, print it out, and exit.
runnerjob = next(jobiter)
runnerjob.run(runtimeContext)
self.stdout.write(runnerjob.uuid+"\n")
return (None, "success")
+ # We either running the workflow directly, or submitting it
+ # and will wait for a final result.
+
+ self.runtime_status_update("activity", "workflow execution")
+
current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current_container:
logger.info("Running inside container %s", current_container.get("uuid"))
+ self.set_container_request_properties(current_container, git_info)
self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
self.polling_thread = threading.Thread(target=self.poll_states)
@@ -735,7 +878,8 @@ The 'jobs' API is no longer supported.
if (self.task_queue.in_flight + len(self.processes)) > 0:
self.workflow_eval_lock.wait(3)
else:
- logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
+ if self.final_status is None:
+ logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
break
if self.stop_polling.is_set():
@@ -785,6 +929,11 @@ The 'jobs' API is no longer supported.
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
+ if runtimeContext.usage_report_notes:
+ logger.info("Steps with low resource utilization (possible optimization opportunities):")
+ for x in runtimeContext.usage_report_notes:
+ logger.info(" %s", x)
+
if runtimeContext.submit and isinstance(tool, Runner):
logger.info("Final output collection %s", tool.final_output)
if workbench2 or workbench1:
diff --git a/sdk/cwl/arvados_cwl/fsaccess.py b/sdk/cwl/arvados_cwl/fsaccess.py
index 4da8f85569..a5e9db0cfe 100644
--- a/sdk/cwl/arvados_cwl/fsaccess.py
+++ b/sdk/cwl/arvados_cwl/fsaccess.py
@@ -16,8 +16,9 @@ import re
import logging
import threading
from collections import OrderedDict
+from io import StringIO
-import ruamel.yaml as yaml
+import ruamel.yaml
import cwltool.stdfsaccess
from cwltool.pathmapper import abspath
@@ -235,19 +236,23 @@ class CollectionFetcher(DefaultFetcher):
return f.read()
if url.startswith("arvwf:"):
record = self.api_client.workflows().get(uuid=url[6:]).execute(num_retries=self.num_retries)
- definition = yaml.round_trip_load(record["definition"])
+ yaml = ruamel.yaml.YAML(typ='rt', pure=True)
+ definition = yaml.load(record["definition"])
definition["label"] = record["name"]
- return yaml.round_trip_dump(definition)
+ stream = StringIO()
+ yaml.dump(definition, stream)
+ return stream.getvalue()
return super(CollectionFetcher, self).fetch_text(url)
def check_exists(self, url):
try:
if url.startswith("http://arvados.org/cwl"):
return True
- if url.startswith("keep:"):
- return self.fsaccess.exists(url)
- if url.startswith("arvwf:"):
- if self.fetch_text(url):
+ urld, _ = urllib.parse.urldefrag(url)
+ if urld.startswith("keep:"):
+ return self.fsaccess.exists(urld)
+ if urld.startswith("arvwf:"):
+ if self.fetch_text(urld):
return True
except arvados.errors.NotFoundError:
return False
diff --git a/sdk/cwl/arvados_cwl/http.py b/sdk/cwl/arvados_cwl/http.py
deleted file mode 100644
index dcc2a51192..0000000000
--- a/sdk/cwl/arvados_cwl/http.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-
-import requests
-import email.utils
-import time
-import datetime
-import re
-import arvados
-import arvados.collection
-import urllib.parse
-import logging
-import calendar
-import urllib.parse
-
-logger = logging.getLogger('arvados.cwl-runner')
-
-def my_formatdate(dt):
- return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),
- localtime=False, usegmt=True)
-
-def my_parsedate(text):
- parsed = email.utils.parsedate_tz(text)
- if parsed:
- if parsed[9]:
- # Adjust to UTC
- return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])
- else:
- # TZ is zero or missing, assume UTC.
- return datetime.datetime(*parsed[:6])
- else:
- return datetime.datetime(1970, 1, 1)
-
-def fresh_cache(url, properties, now):
- pr = properties[url]
- expires = None
-
- logger.debug("Checking cache freshness for %s using %s", url, pr)
-
- if "Cache-Control" in pr:
- if re.match(r"immutable", pr["Cache-Control"]):
- return True
-
- g = re.match(r"(s-maxage|max-age)=(\d+)", pr["Cache-Control"])
- if g:
- expires = my_parsedate(pr["Date"]) + datetime.timedelta(seconds=int(g.group(2)))
-
- if expires is None and "Expires" in pr:
- expires = my_parsedate(pr["Expires"])
-
- if expires is None:
- # Use a default cache time of 24 hours if upstream didn't set
- # any cache headers, to reduce redundant downloads.
- expires = my_parsedate(pr["Date"]) + datetime.timedelta(hours=24)
-
- if not expires:
- return False
-
- return (now < expires)
-
-def remember_headers(url, properties, headers, now):
- properties.setdefault(url, {})
- for h in ("Cache-Control", "ETag", "Expires", "Date", "Content-Length"):
- if h in headers:
- properties[url][h] = headers[h]
- if "Date" not in headers:
- properties[url]["Date"] = my_formatdate(now)
-
-
-def changed(url, properties, now):
- req = requests.head(url, allow_redirects=True)
- remember_headers(url, properties, req.headers, now)
-
- if req.status_code != 200:
- raise Exception("Got status %s" % req.status_code)
-
- pr = properties[url]
- if "ETag" in pr and "ETag" in req.headers:
- if pr["ETag"] == req.headers["ETag"]:
- return False
-
- return True
-
-def http_to_keep(api, project_uuid, url, utcnow=datetime.datetime.utcnow):
- r = api.collections().list(filters=[["properties", "exists", url]]).execute()
-
- now = utcnow()
-
- for item in r["items"]:
- properties = item["properties"]
- if fresh_cache(url, properties, now):
- # Do nothing
- cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
- return "keep:%s/%s" % (item["portable_data_hash"], list(cr.keys())[0])
-
- if not changed(url, properties, now):
- # ETag didn't change, same content, just update headers
- api.collections().update(uuid=item["uuid"], body={"collection":{"properties": properties}}).execute()
- cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
- return "keep:%s/%s" % (item["portable_data_hash"], list(cr.keys())[0])
-
- properties = {}
- req = requests.get(url, stream=True, allow_redirects=True)
-
- if req.status_code != 200:
- raise Exception("Failed to download '%s' got status %s " % (url, req.status_code))
-
- remember_headers(url, properties, req.headers, now)
-
- if "Content-Length" in properties[url]:
- cl = int(properties[url]["Content-Length"])
- logger.info("Downloading %s (%s bytes)", url, cl)
- else:
- cl = None
- logger.info("Downloading %s (unknown size)", url)
-
- c = arvados.collection.Collection()
-
- if req.headers.get("Content-Disposition"):
- grp = re.search(r'filename=("((\"|[^"])+)"|([^][()<>@,;:\"/?={} ]+))', req.headers["Content-Disposition"])
- if grp.group(2):
- name = grp.group(2)
- else:
- name = grp.group(4)
- else:
- name = urllib.parse.urlparse(url).path.split("/")[-1]
-
- count = 0
- start = time.time()
- checkpoint = start
- with c.open(name, "wb") as f:
- for chunk in req.iter_content(chunk_size=1024):
- count += len(chunk)
- f.write(chunk)
- loopnow = time.time()
- if (loopnow - checkpoint) > 20:
- bps = count / (loopnow - start)
- if cl is not None:
- logger.info("%2.1f%% complete, %3.2f MiB/s, %1.0f seconds left",
- ((count * 100) / cl),
- (bps // (1024*1024)),
- ((cl-count) // bps))
- else:
- logger.info("%d downloaded, %3.2f MiB/s", count, (bps / (1024*1024)))
- checkpoint = loopnow
-
-
- collectionname = "Downloaded from %s" % urllib.parse.quote(url, safe='')
- c.save_new(name=collectionname, owner_uuid=project_uuid, ensure_unique_name=True)
-
- api.collections().update(uuid=c.manifest_locator(), body={"collection":{"properties": properties}}).execute()
-
- return "keep:%s/%s" % (c.portable_data_hash(), name)
diff --git a/sdk/cwl/arvados_cwl/pathmapper.py b/sdk/cwl/arvados_cwl/pathmapper.py
index 64fdfa0d04..448facf776 100644
--- a/sdk/cwl/arvados_cwl/pathmapper.py
+++ b/sdk/cwl/arvados_cwl/pathmapper.py
@@ -26,7 +26,7 @@ from cwltool.utils import adjustFileObjs, adjustDirObjs
from cwltool.stdfsaccess import abspath
from cwltool.workflow import WorkflowException
-from .http import http_to_keep
+from arvados.http_to_keep import http_to_keep
logger = logging.getLogger('arvados.cwl-runner')
@@ -105,11 +105,18 @@ class ArvPathMapper(PathMapper):
raise WorkflowException("Directory literal '%s' is missing `listing`" % src)
elif src.startswith("http:") or src.startswith("https:"):
try:
- keepref = http_to_keep(self.arvrunner.api, self.arvrunner.project_uuid, src)
- logger.info("%s is %s", src, keepref)
- self._pathmap[src] = MapperEnt(keepref, keepref, srcobj["class"], True)
+ if self.arvrunner.defer_downloads:
+ # passthrough, we'll download it later.
+ self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
+ else:
+ results = http_to_keep(self.arvrunner.api, self.arvrunner.project_uuid, src,
+ varying_url_params=self.arvrunner.toplevel_runtimeContext.varying_url_params,
+ prefer_cached_downloads=self.arvrunner.toplevel_runtimeContext.prefer_cached_downloads)
+ keepref = "keep:%s/%s" % (results[0], results[1])
+ logger.info("%s is %s", src, keepref)
+ self._pathmap[src] = MapperEnt(keepref, keepref, srcobj["class"], True)
except Exception as e:
- logger.warning(str(e))
+ logger.warning("Download error: %s", e)
else:
self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
@@ -141,7 +148,7 @@ class ArvPathMapper(PathMapper):
for opt in self.optional_deps:
if obj["location"] == opt["location"]:
return
- raise SourceLine(obj, "location", WorkflowException).makeError("Don't know what to do with '%s'" % obj["location"])
+ raise SourceLine(obj, "location", WorkflowException).makeError("Can't handle '%s'" % obj["location"])
def needs_new_collection(self, srcobj, prefix=""):
"""Check if files need to be staged into a new collection.
@@ -156,6 +163,9 @@ class ArvPathMapper(PathMapper):
if loc.startswith("_:"):
return True
+ if self.arvrunner.defer_downloads and (loc.startswith("http:") or loc.startswith("https:")):
+ return False
+
i = loc.rfind("/")
if i > -1:
loc_prefix = loc[:i+1]
diff --git a/sdk/cwl/arvados_cwl/runner.py b/sdk/cwl/arvados_cwl/runner.py
index 225f4ae60e..437aa39eb8 100644
--- a/sdk/cwl/arvados_cwl/runner.py
+++ b/sdk/cwl/arvados_cwl/runner.py
@@ -42,10 +42,7 @@ from cwltool.utils import (
CWLOutputType,
)
-if os.name == "posix" and sys.version_info[0] < 3:
- import subprocess32 as subprocess
-else:
- import subprocess
+import subprocess
from schema_salad.sourceline import SourceLine, cmap
@@ -53,13 +50,14 @@ from cwltool.command_line_tool import CommandLineTool
import cwltool.workflow
from cwltool.process import (scandeps, UnsupportedRequirement, normalizeFilesDirs,
shortname, Process, fill_in_defaults)
-from cwltool.load_tool import fetch_document
+from cwltool.load_tool import fetch_document, jobloaderctx
from cwltool.utils import aslist, adjustFileObjs, adjustDirObjs, visit_class
from cwltool.builder import substitute
from cwltool.pack import pack
from cwltool.update import INTERNAL_VERSION
from cwltool.builder import Builder
import schema_salad.validate as validate
+import schema_salad.ref_resolver
import arvados.collection
import arvados.util
@@ -68,7 +66,7 @@ from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap, CommentedSeq
import arvados_cwl.arvdocker
-from .pathmapper import ArvPathMapper, trim_listing, collection_pdh_pattern, collection_uuid_pattern
+from .pathmapper import ArvPathMapper, trim_listing, collection_pdh_pattern, collection_uuid_pattern, MapperEnt
from ._version import __version__
from . import done
from . context import ArvRuntimeContext
@@ -294,7 +292,7 @@ def discover_secondary_files(fsaccess, builder, inputs, job_order, discovered=No
set_secondary(fsaccess, builder, inputschema, None, primary, discovered)
def upload_dependencies(arvrunner, name, document_loader,
- workflowobj, uri, loadref_run, runtimeContext,
+ workflowobj, uri, runtimeContext,
include_primary=True, discovered_secondaryfiles=None,
cache=None):
"""Upload the dependencies of the workflowobj document to Keep.
@@ -302,64 +300,27 @@ def upload_dependencies(arvrunner, name, document_loader,
Returns a pathmapper object mapping local paths to keep references. Also
does an in-place update of references in "workflowobj".
- Use scandeps to find $import, $include, $schemas, run, File and Directory
+ Use scandeps to find $schemas, File and Directory
fields that represent external references.
If workflowobj has an "id" field, this will reload the document to ensure
it is scanning the raw document prior to preprocessing.
"""
- loaded = set()
- def loadref(b, u):
- joined = document_loader.fetcher.urljoin(b, u)
- defrg, _ = urllib.parse.urldefrag(joined)
- if defrg not in loaded:
- loaded.add(defrg)
- if cache is not None and defrg in cache:
- return cache[defrg]
- # Use fetch_text to get raw file (before preprocessing).
- text = document_loader.fetch_text(defrg)
- if isinstance(text, bytes):
- textIO = StringIO(text.decode('utf-8'))
- else:
- textIO = StringIO(text)
- yamlloader = YAML(typ='safe', pure=True)
- result = yamlloader.load(textIO)
- if cache is not None:
- cache[defrg] = result
- return result
- else:
- return {}
-
- if loadref_run:
- loadref_fields = set(("$import", "run"))
- else:
- loadref_fields = set(("$import",))
-
scanobj = workflowobj
- if "id" in workflowobj and not workflowobj["id"].startswith("_:"):
- defrg, _ = urllib.parse.urldefrag(workflowobj["id"])
- if cache is not None and defrg not in cache:
- # if we haven't seen this file before, want raw file
- # content (before preprocessing) to ensure that external
- # references like $include haven't already been inlined.
- scanobj = loadref("", workflowobj["id"])
-
metadata = scanobj
- with Perf(metrics, "scandeps include, location"):
+ with Perf(metrics, "scandeps"):
sc_result = scandeps(uri, scanobj,
- loadref_fields,
- set(("$include", "location")),
- loadref, urljoin=document_loader.fetcher.urljoin,
+ set(),
+ set(("location",)),
+ None, urljoin=document_loader.fetcher.urljoin,
nestdirs=False)
-
- with Perf(metrics, "scandeps $schemas"):
optional_deps = scandeps(uri, scanobj,
- loadref_fields,
- set(("$schemas",)),
- loadref, urljoin=document_loader.fetcher.urljoin,
- nestdirs=False)
+ set(),
+ set(("$schemas",)),
+ None, urljoin=document_loader.fetcher.urljoin,
+ nestdirs=False)
if sc_result is None:
sc_result = []
@@ -483,15 +444,18 @@ def upload_dependencies(arvrunner, name, document_loader,
single_collection=True,
optional_deps=optional_deps)
+ for k, v in uuid_map.items():
+ mapper._pathmap["keep:"+k] = MapperEnt(v, "", "", False)
+
keeprefs = set()
def addkeepref(k):
if k.startswith("keep:"):
keeprefs.add(collection_pdh_pattern.match(k).group(1))
- def setloc(p):
+
+ def collectloc(p):
loc = p.get("location")
if loc and (not loc.startswith("_:")) and (not loc.startswith("keep:")):
- p["location"] = mapper.mapper(p["location"]).resolved
addkeepref(p["location"])
return
@@ -522,12 +486,10 @@ def upload_dependencies(arvrunner, name, document_loader,
if uuid not in uuid_map:
raise SourceLine(p, "location", validate.ValidationException).makeError(
"Collection uuid %s not found" % uuid)
- p["location"] = "keep:%s%s" % (uuid_map[uuid], gp.groups()[1] if gp.groups()[1] else "")
- p[collectionUUID] = uuid
- with Perf(metrics, "setloc"):
- visit_class(workflowobj, ("File", "Directory"), setloc)
- visit_class(discovered, ("File", "Directory"), setloc)
+ with Perf(metrics, "collectloc"):
+ visit_class(workflowobj, ("File", "Directory"), collectloc)
+ visit_class(discovered, ("File", "Directory"), collectloc)
if discovered_secondaryfiles is not None:
for d in discovered:
@@ -551,6 +513,7 @@ def upload_dependencies(arvrunner, name, document_loader,
logger.warning("Cannot find collection with portable data hash %s", kr)
continue
col = col["items"][0]
+ col["name"] = arvados.util.trim_name(col["name"])
try:
arvrunner.api.collections().create(body={"collection": {
"owner_uuid": runtimeContext.project_uuid,
@@ -563,7 +526,7 @@ def upload_dependencies(arvrunner, name, document_loader,
"trash_at": col["trash_at"]
}}, ensure_unique_name=True).execute()
except Exception as e:
- logger.warning("Unable copy collection to destination: %s", e)
+ logger.warning("Unable to copy collection to destination: %s", e)
if "$schemas" in workflowobj:
sch = CommentedSeq()
@@ -585,26 +548,16 @@ def upload_docker(arvrunner, tool, runtimeContext):
raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
"Option 'dockerOutputDirectory' of DockerRequirement not supported.")
- arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, docker_req, True,
- runtimeContext.project_uuid,
- runtimeContext.force_docker_pull,
- runtimeContext.tmp_outdir_prefix,
- runtimeContext.match_local_docker,
- runtimeContext.copy_deps)
+ arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, docker_req, True, runtimeContext)
else:
arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, {"dockerPull": "arvados/jobs:"+__version__},
- True,
- runtimeContext.project_uuid,
- runtimeContext.force_docker_pull,
- runtimeContext.tmp_outdir_prefix,
- runtimeContext.match_local_docker,
- runtimeContext.copy_deps)
+ True, runtimeContext)
elif isinstance(tool, cwltool.workflow.Workflow):
for s in tool.steps:
upload_docker(arvrunner, s.embedded_tool, runtimeContext)
-def packed_workflow(arvrunner, tool, merged_map, runtimeContext):
+def packed_workflow(arvrunner, tool, merged_map, runtimeContext, git_info):
"""Create a packed workflow.
A "packed" workflow is one where all the components have been combined into a single document."""
@@ -633,17 +586,18 @@ def packed_workflow(arvrunner, tool, merged_map, runtimeContext):
v["secondaryFiles"] = merged_map[cur_id].secondaryFiles[v["location"]]
if v.get("class") == "DockerRequirement":
v["http://arvados.org/cwl#dockerCollectionPDH"] = arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, v, True,
- runtimeContext.project_uuid,
- runtimeContext.force_docker_pull,
- runtimeContext.tmp_outdir_prefix,
- runtimeContext.match_local_docker,
- runtimeContext.copy_deps)
+ runtimeContext)
for l in v:
visit(v[l], cur_id)
if isinstance(v, list):
for l in v:
visit(l, cur_id)
visit(packed, None)
+
+ if git_info:
+ for g in git_info:
+ packed[g] = git_info[g]
+
return packed
@@ -657,6 +611,73 @@ def tag_git_version(packed):
else:
packed["http://schema.org/version"] = githash
+def setloc(mapper, p):
+ loc = p.get("location")
+ if loc and (not loc.startswith("_:")) and (not loc.startswith("keep:")):
+ p["location"] = mapper.mapper(p["location"]).resolved
+ return
+
+ if not loc:
+ return
+
+ if collectionUUID in p:
+ uuid = p[collectionUUID]
+ keepuuid = "keep:"+uuid
+ if keepuuid not in mapper:
+ raise SourceLine(p, collectionUUID, validate.ValidationException).makeError(
+ "Collection uuid %s not found" % uuid)
+ gp = collection_pdh_pattern.match(loc)
+ if gp and mapper.mapper(keepuuid).resolved != gp.groups()[0]:
+ # This file entry has both collectionUUID and a PDH
+ # location. If the PDH doesn't match the one returned
+ # the API server, raise an error.
+ raise SourceLine(p, "location", validate.ValidationException).makeError(
+ "Expected collection uuid %s to be %s but API server reported %s" % (
+ uuid, gp.groups()[0], mapper.mapper(keepuuid).resolved))
+
+ gp = collection_uuid_pattern.match(loc)
+ if not gp:
+ # Not a uuid pattern (must be a pdh pattern)
+ return
+
+ uuid = gp.groups()[0]
+ keepuuid = "keep:"+uuid
+ if keepuuid not in mapper:
+ raise SourceLine(p, "location", validate.ValidationException).makeError(
+ "Collection uuid %s not found" % uuid)
+ p["location"] = "keep:%s%s" % (mapper.mapper(keepuuid).resolved, gp.groups()[1] if gp.groups()[1] else "")
+ p[collectionUUID] = uuid
+
+def update_from_mapper(workflowobj, mapper):
+ with Perf(metrics, "setloc"):
+ visit_class(workflowobj, ("File", "Directory"), partial(setloc, mapper))
+
+def apply_merged_map(merged_map, workflowobj):
+ def visit(v, cur_id):
+ if isinstance(v, dict):
+ if v.get("class") in ("CommandLineTool", "Workflow", "ExpressionTool"):
+ if "id" in v:
+ cur_id = v["id"]
+ if "path" in v and "location" not in v:
+ v["location"] = v["path"]
+ del v["path"]
+ if "location" in v and cur_id in merged_map:
+ if v["location"] in merged_map[cur_id].resolved:
+ v["location"] = merged_map[cur_id].resolved[v["location"]]
+ if v["location"] in merged_map[cur_id].secondaryFiles:
+ v["secondaryFiles"] = merged_map[cur_id].secondaryFiles[v["location"]]
+ #if v.get("class") == "DockerRequirement":
+ # v["http://arvados.org/cwl#dockerCollectionPDH"] = arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, v, True,
+ # runtimeContext)
+ for l in v:
+ visit(v[l], cur_id)
+ if isinstance(v, list):
+ for l in v:
+ visit(l, cur_id)
+ visit(workflowobj, None)
+
+def update_from_merged_map(tool, merged_map):
+ tool.visit(partial(apply_merged_map, merged_map))
def upload_job_order(arvrunner, name, tool, job_order, runtimeContext):
"""Upload local files referenced in the input object and return updated input
@@ -689,12 +710,14 @@ def upload_job_order(arvrunner, name, tool, job_order, runtimeContext):
tool.tool["inputs"],
job_order)
+ _jobloaderctx = jobloaderctx.copy()
+ jobloader = schema_salad.ref_resolver.Loader(_jobloaderctx, fetcher_constructor=tool.doc_loader.fetcher_constructor)
+
jobmapper = upload_dependencies(arvrunner,
name,
- tool.doc_loader,
+ jobloader,
job_order,
job_order.get("id", "#"),
- False,
runtimeContext)
if "id" in job_order:
@@ -705,7 +728,9 @@ def upload_job_order(arvrunner, name, tool, job_order, runtimeContext):
if "job_order" in job_order:
del job_order["job_order"]
- return job_order
+ update_from_mapper(job_order, jobmapper)
+
+ return job_order, jobmapper
FileUpdates = namedtuple("FileUpdates", ["resolved", "secondaryFiles"])
@@ -719,28 +744,38 @@ def upload_workflow_deps(arvrunner, tool, runtimeContext):
merged_map = {}
tool_dep_cache = {}
+
+ todo = []
+
+ # Standard traversal is top down, we want to go bottom up, so use
+ # the visitor to accumalate a list of nodes to visit, then
+ # visit them in reverse order.
def upload_tool_deps(deptool):
if "id" in deptool:
- discovered_secondaryfiles = {}
- with Perf(metrics, "upload_dependencies %s" % shortname(deptool["id"])):
- pm = upload_dependencies(arvrunner,
- "%s dependencies" % (shortname(deptool["id"])),
- document_loader,
- deptool,
- deptool["id"],
- False,
- runtimeContext,
- include_primary=False,
- discovered_secondaryfiles=discovered_secondaryfiles,
- cache=tool_dep_cache)
- document_loader.idx[deptool["id"]] = deptool
- toolmap = {}
- for k,v in pm.items():
- toolmap[k] = v.resolved
- merged_map[deptool["id"]] = FileUpdates(toolmap, discovered_secondaryfiles)
+ todo.append(deptool)
tool.visit(upload_tool_deps)
+ for deptool in reversed(todo):
+ discovered_secondaryfiles = {}
+ with Perf(metrics, "upload_dependencies %s" % shortname(deptool["id"])):
+ pm = upload_dependencies(arvrunner,
+ "%s dependencies" % (shortname(deptool["id"])),
+ document_loader,
+ deptool,
+ deptool["id"],
+ runtimeContext,
+ include_primary=False,
+ discovered_secondaryfiles=discovered_secondaryfiles,
+ cache=tool_dep_cache)
+
+ document_loader.idx[deptool["id"]] = deptool
+ toolmap = {}
+ for k,v in pm.items():
+ toolmap[k] = v.resolved
+
+ merged_map[deptool["id"]] = FileUpdates(toolmap, discovered_secondaryfiles)
+
return merged_map
def arvados_jobs_image(arvrunner, img, runtimeContext):
@@ -748,12 +783,7 @@ def arvados_jobs_image(arvrunner, img, runtimeContext):
try:
return arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, {"dockerPull": img},
- True,
- runtimeContext.project_uuid,
- runtimeContext.force_docker_pull,
- runtimeContext.tmp_outdir_prefix,
- runtimeContext.match_local_docker,
- runtimeContext.copy_deps)
+ True, runtimeContext)
except Exception as e:
raise Exception("Docker image %s is not available\n%s" % (img, e) )
@@ -787,19 +817,20 @@ class Runner(Process):
"""Base class for runner processes, which submit an instance of
arvados-cwl-runner and wait for the final result."""
- def __init__(self, runner, updated_tool,
+ def __init__(self, runner,
tool, loadingContext, enable_reuse,
output_name, output_tags, submit_runner_ram=0,
name=None, on_error=None, submit_runner_image=None,
intermediate_output_ttl=0, merged_map=None,
priority=None, secret_store=None,
collection_cache_size=256,
- collection_cache_is_default=True):
+ collection_cache_is_default=True,
+ git_info=None,
+ reuse_runner=False):
- loadingContext = loadingContext.copy()
- loadingContext.metadata = updated_tool.metadata.copy()
+ self.loadingContext = loadingContext.copy()
- super(Runner, self).__init__(updated_tool.tool, loadingContext)
+ super(Runner, self).__init__(tool.tool, loadingContext)
self.arvrunner = runner
self.embedded_tool = tool
@@ -811,6 +842,9 @@ class Runner(Process):
reuse_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#ReuseRequirement")
if reuse_req:
enable_reuse = reuse_req["enableReuse"]
+ reuse_req, _ = self.embedded_tool.get_requirement("WorkReuse")
+ if reuse_req:
+ enable_reuse = reuse_req["enableReuse"]
self.enable_reuse = enable_reuse
self.uuid = None
self.final_output = None
@@ -822,7 +856,10 @@ class Runner(Process):
self.intermediate_output_ttl = intermediate_output_ttl
self.priority = priority
self.secret_store = secret_store
- self.enable_dev = loadingContext.enable_dev
+ self.enable_dev = self.loadingContext.enable_dev
+ self.git_info = git_info
+ self.fast_parser = self.loadingContext.fast_parser
+ self.reuse_runner = reuse_runner
self.submit_runner_cores = 1
self.submit_runner_ram = 1024 # defaut 1 GiB
@@ -885,7 +922,8 @@ class Runner(Process):
api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
- done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
+ done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40,
+ include_crunchrun=(record.get("exit_code") is None or record.get("exit_code") > 127))
self.final_output = record["output"]
outc = arvados.collection.CollectionReader(self.final_output,
@@ -907,3 +945,42 @@ class Runner(Process):
self.arvrunner.output_callback({}, "permanentFail")
else:
self.arvrunner.output_callback(outputs, processStatus)
+
+
+def print_keep_deps_visitor(api, runtimeContext, references, doc_loader, tool):
+ def collect_locators(obj):
+ loc = obj.get("location", "")
+
+ g = arvados.util.keepuri_pattern.match(loc)
+ if g:
+ references.add(g[1])
+
+ if obj.get("class") == "http://arvados.org/cwl#WorkflowRunnerResources" and "acrContainerImage" in obj:
+ references.add(obj["acrContainerImage"])
+
+ if obj.get("class") == "DockerRequirement":
+ references.add(arvados_cwl.arvdocker.arv_docker_get_image(api, obj, False, runtimeContext))
+
+ sc_result = scandeps(tool["id"], tool,
+ set(),
+ set(("location", "id")),
+ None, urljoin=doc_loader.fetcher.urljoin,
+ nestdirs=False)
+
+ visit_class(sc_result, ("File", "Directory"), collect_locators)
+ visit_class(tool, ("DockerRequirement", "http://arvados.org/cwl#WorkflowRunnerResources"), collect_locators)
+
+
+def print_keep_deps(arvRunner, runtimeContext, merged_map, tool):
+ references = set()
+
+ tool.visit(partial(print_keep_deps_visitor, arvRunner.api, runtimeContext, references, tool.doc_loader))
+
+ for mm in merged_map:
+ for k, v in merged_map[mm].resolved.items():
+ g = arvados.util.keepuri_pattern.match(v)
+ if g:
+ references.add(g[1])
+
+ json.dump(sorted(references), arvRunner.stdout)
+ print(file=arvRunner.stdout)
diff --git a/sdk/cwl/arvados_cwl/util.py b/sdk/cwl/arvados_cwl/util.py
index a0dfb290c1..299f854ec2 100644
--- a/sdk/cwl/arvados_cwl/util.py
+++ b/sdk/cwl/arvados_cwl/util.py
@@ -18,7 +18,7 @@ def get_intermediate_collection_info(workflow_step_name, current_container, inte
container_uuid = None
props = {"type": "intermediate"}
if current_container:
- props["container"] = current_container['uuid']
+ props["container_uuid"] = current_container['uuid']
return {"name" : name, "trash_at" : trash_time, "properties" : props}
@@ -34,3 +34,18 @@ def get_current_container(api, num_retries=0, logger=None):
raise e
return current_container
+
+
+def common_prefix(firstfile, all_files):
+ common_parts = firstfile.split('/')
+ common_parts[-1] = ''
+ for f in all_files:
+ f_parts = f.split('/')
+ for index, (a, b) in enumerate(zip(common_parts, f_parts)):
+ if a != b:
+ common_parts = common_parts[:index + 1]
+ common_parts[-1] = ''
+ break
+ if not any(common_parts):
+ break
+ return '/'.join(common_parts)
diff --git a/sdk/cwl/arvados_version.py b/sdk/cwl/arvados_version.py
index c3936617f0..794b6afe42 100644
--- a/sdk/cwl/arvados_version.py
+++ b/sdk/cwl/arvados_version.py
@@ -1,62 +1,145 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+#
+# This file runs in one of three modes:
+#
+# 1. If the ARVADOS_BUILDING_VERSION environment variable is set, it writes
+# _version.py and generates dependencies based on that value.
+# 2. If running from an arvados Git checkout, it writes _version.py
+# and generates dependencies from Git.
+# 3. Otherwise, we expect this is source previously generated from Git, and
+# it reads _version.py and generates dependencies from it.
-import subprocess
-import time
import os
import re
+import runpy
+import subprocess
import sys
-SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
-VERSION_PATHS = {
- SETUP_DIR,
- os.path.abspath(os.path.join(SETUP_DIR, "../python")),
- os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
- }
+from pathlib import Path
+
+# These maps explain the relationships between different Python modules in
+# the arvados repository. We use these to help generate setup.py.
+PACKAGE_DEPENDENCY_MAP = {
+ 'arvados-cwl-runner': ['arvados-python-client', 'crunchstat_summary'],
+ 'arvados-user-activity': ['arvados-python-client'],
+ 'arvados_fuse': ['arvados-python-client'],
+ 'crunchstat_summary': ['arvados-python-client'],
+}
+PACKAGE_MODULE_MAP = {
+ 'arvados-cwl-runner': 'arvados_cwl',
+ 'arvados-docker-cleaner': 'arvados_docker',
+ 'arvados-python-client': 'arvados',
+ 'arvados-user-activity': 'arvados_user_activity',
+ 'arvados_fuse': 'arvados_fuse',
+ 'crunchstat_summary': 'crunchstat_summary',
+}
+PACKAGE_SRCPATH_MAP = {
+ 'arvados-cwl-runner': Path('sdk', 'cwl'),
+ 'arvados-docker-cleaner': Path('services', 'dockercleaner'),
+ 'arvados-python-client': Path('sdk', 'python'),
+ 'arvados-user-activity': Path('tools', 'user-activity'),
+ 'arvados_fuse': Path('services', 'fuse'),
+ 'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+}
+
+ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
+SETUP_DIR = Path(__file__).absolute().parent
+try:
+ REPO_PATH = Path(subprocess.check_output(
+ ['git', '-C', str(SETUP_DIR), 'rev-parse', '--show-toplevel'],
+ stderr=subprocess.DEVNULL,
+ text=True,
+ ).rstrip('\n'))
+except (subprocess.CalledProcessError, OSError):
+ REPO_PATH = None
+else:
+ # Verify this is the arvados monorepo
+ if all((REPO_PATH / path).exists() for path in PACKAGE_SRCPATH_MAP.values()):
+ PACKAGE_NAME, = (
+ pkg_name for pkg_name, path in PACKAGE_SRCPATH_MAP.items()
+ if (REPO_PATH / path) == SETUP_DIR
+ )
+ MODULE_NAME = PACKAGE_MODULE_MAP[PACKAGE_NAME]
+ VERSION_SCRIPT_PATH = Path(REPO_PATH, 'build', 'version-at-commit.sh')
+ else:
+ REPO_PATH = None
+if REPO_PATH is None:
+ (PACKAGE_NAME, MODULE_NAME), = (
+ (pkg_name, mod_name)
+ for pkg_name, mod_name in PACKAGE_MODULE_MAP.items()
+ if (SETUP_DIR / mod_name).is_dir()
+ )
+
+def short_tests_only(arglist=sys.argv):
+ try:
+ arglist.remove('--short-tests-only')
+ except ValueError:
+ return False
+ else:
+ return True
+
+def git_log_output(path, *args):
+ return subprocess.check_output(
+ ['git', '-C', str(REPO_PATH),
+ 'log', '--first-parent', '--max-count=1',
+ *args, str(path)],
+ text=True,
+ ).rstrip('\n')
def choose_version_from():
- ts = {}
- for path in VERSION_PATHS:
- ts[subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', path]).strip()] = path
-
- sorted_ts = sorted(ts.items())
- getver = sorted_ts[-1][1]
- print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+ ver_paths = [SETUP_DIR, VERSION_SCRIPT_PATH, *(
+ PACKAGE_SRCPATH_MAP[pkg]
+ for pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ())
+ )]
+ getver = max(ver_paths, key=lambda path: git_log_output(path, '--format=format:%ct'))
+ print(f"Using {getver} for version number calculation of {SETUP_DIR}", file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
- myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
- '--format=%H', curdir]).strip()
- myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
- return myversion
+ myhash = git_log_output(curdir, '--format=%H')
+ return subprocess.check_output(
+ [str(VERSION_SCRIPT_PATH), myhash],
+ text=True,
+ ).rstrip('\n')
def save_version(setup_dir, module, v):
- v = v.replace("~dev", ".dev").replace("~rc", "rc")
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ with Path(setup_dir, module, '_version.py').open('w') as fp:
+ print(f"__version__ = {v!r}", file=fp)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
-
-def get_version(setup_dir, module):
- env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+ file_vars = runpy.run_path(Path(setup_dir, module, '_version.py'))
+ return file_vars['__version__']
- if env_version:
- save_version(setup_dir, module, env_version)
+def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
+ if ENV_VERSION:
+ version = ENV_VERSION
+ elif REPO_PATH is None:
+ return read_version(setup_dir, module)
else:
- try:
- save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError) as err:
- print("ERROR: {0}".format(err), file=sys.stderr)
- pass
+ version = git_version_at_commit()
+ version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ save_version(setup_dir, module, version)
+ return version
- return read_version(setup_dir, module)
+def iter_dependencies(version=None):
+ if version is None:
+ version = get_version()
+ # A packaged development release should be installed with other
+ # development packages built from the same source, but those
+ # dependencies may have earlier "dev" versions (read: less recent
+ # Git commit timestamps). This compatible version dependency
+ # expresses that as closely as possible. Allowing versions
+ # compatible with .dev0 allows any development release.
+ # Regular expression borrowed partially from
+ #
+ dep_ver, match_count = re.subn(r'\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)
+ dep_op = '~=' if match_count else '=='
+ for dep_pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ()):
+ yield f'{dep_pkg}{dep_op}{dep_ver}'
# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
if __name__ == '__main__':
- print(get_version(SETUP_DIR, "arvados_cwl"))
+ print(get_version())
diff --git a/sdk/cwl/setup.py b/sdk/cwl/setup.py
index 66cda19f40..829dbf0544 100644
--- a/sdk/cwl/setup.py
+++ b/sdk/cwl/setup.py
@@ -9,16 +9,9 @@ import sys
from setuptools import setup, find_packages
-SETUP_DIR = os.path.dirname(__file__) or '.'
-README = os.path.join(SETUP_DIR, 'README.rst')
-
import arvados_version
-version = arvados_version.get_version(SETUP_DIR, "arvados_cwl")
-if os.environ.get('ARVADOS_BUILDING_VERSION', False):
- pysdk_dep = "=={}".format(version)
-else:
- # On dev releases, arvados-python-client may have a different timestamp
- pysdk_dep = "<={}".format(version)
+version = arvados_version.get_version()
+README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
setup(name='arvados-cwl-runner',
version=version,
@@ -36,25 +29,19 @@ setup(name='arvados-cwl-runner',
# file to determine what version of cwltool and schema-salad to
# build.
install_requires=[
- 'cwltool==3.1.20220623174452',
- 'schema-salad==8.3.20220801194920',
- 'arvados-python-client{}'.format(pysdk_dep),
- 'setuptools',
+ *arvados_version.iter_dependencies(version),
+ 'cwltool==3.1.20230601100705',
+ 'schema-salad==8.4.20230601112322',
'ciso8601 >= 2.0.0',
- 'networkx < 2.6',
- 'msgpack==1.0.3'
+ 'setuptools>=40.3.0',
],
data_files=[
('share/doc/arvados-cwl-runner', ['LICENSE-2.0.txt', 'README.rst']),
],
- python_requires=">=3.5, <4",
+ python_requires="~=3.8",
classifiers=[
'Programming Language :: Python :: 3',
],
test_suite='tests',
- tests_require=[
- 'mock>=1.0,<4',
- 'subprocess32>=3.5.1',
- ],
zip_safe=True,
)
diff --git a/sdk/cwl/test_with_arvbox.sh b/sdk/cwl/test_with_arvbox.sh
index 354d6f0e56..51d64b3f84 100755
--- a/sdk/cwl/test_with_arvbox.sh
+++ b/sdk/cwl/test_with_arvbox.sh
@@ -5,8 +5,10 @@
set -x
+cwldir=$(readlink -f $(dirname $0))
+
if ! which arvbox >/dev/null ; then
- export PATH=$PATH:$(readlink -f $(dirname $0)/../../tools/arvbox/bin)
+ export PATH=$PATH:$cwldir/../../tools/arvbox/bin
fi
reset_container=1
@@ -14,9 +16,9 @@ leave_running=0
config=dev
devcwl=0
tag="latest"
-pythoncmd=python3
suite=conformance
runapi=containers
+reinstall=0
while test -n "$1" ; do
arg="$1"
@@ -45,8 +47,12 @@ while test -n "$1" ; do
devcwl=1
shift
;;
+ --reinstall)
+ reinstall=1
+ shift
+ ;;
--pythoncmd)
- pythoncmd=$2
+ echo "warning: --pythoncmd option is no longer supported; ignored" >&2
shift ; shift
;;
--suite)
@@ -58,7 +64,7 @@ while test -n "$1" ; do
shift ; shift
;;
-h|--help)
- echo "$0 [--no-reset-container] [--leave-running] [--config dev|localdemo] [--tag docker_tag] [--build] [--pythoncmd python(2|3)] [--suite (integration|conformance-v1.0|conformance-*)]"
+ echo "$0 [--no-reset-container] [--leave-running] [--config dev|localdemo] [--tag docker_tag] [--build] [--suite (integration|conformance-v1.0|conformance-*)]"
exit
;;
*)
@@ -87,28 +93,24 @@ arvbox start $config $tag
# of using the one inside the container, so we can make changes to the
# integration tests without necessarily having to rebuilding the
# container image.
-docker cp -L $(readlink -f $(dirname $0)/tests) $ARVBOX_CONTAINER:/usr/src/arvados/sdk/cwl
+docker cp -L $cwldir/tests $ARVBOX_CONTAINER:/usr/src/arvados/sdk/cwl
arvbox pipe < /dev/null ; then
fi
# Use the python executor associated with the installed OS package, if present.
-python=$(((ls /usr/share/python3*/dist/python3-arvados-cwl-runner/bin/python || echo python3) | head -n1) 2>/dev/null)
+python="$(PATH="/usr/lib/python3-arvados-cwl-runner/bin:/opt/arvados-py/bin:$PATH" command -v python3)"
# Test for #18888
# This is a standalone test because the bug was observed with this
diff --git a/sdk/cwl/tests/arvados-tests.yml b/sdk/cwl/tests/arvados-tests.yml
index 2f309cfe81..cb4a151f0e 100644
--- a/sdk/cwl/tests/arvados-tests.yml
+++ b/sdk/cwl/tests/arvados-tests.yml
@@ -224,7 +224,7 @@
out: null
tool: wf-defaults/wf4.cwl
doc: default in embedded subworkflow missing 'id' field, v1.0
- should_fail: true
+ should_fail: false
- job: null
output:
@@ -469,3 +469,39 @@
}
tool: 19109-upload-secondary.cwl
doc: "Test issue 19109 - correctly discover & upload secondary files"
+
+- job: 19678-name-id.yml
+ output: {
+ "processed_name": {
+ "first": "foo",
+ "last": "bar"
+ }
+ }
+ tool: 19678-name-id.cwl
+ doc: "Test issue 19678 - non-string type input parameter called 'name'"
+
+- job: oom/fakeoom.yml
+ output: {}
+ tool: oom/19975-oom.cwl
+ doc: "Test feature 19975 - retry on exit 137"
+
+- job: oom/fakeoom.yml
+ output: {}
+ tool: oom/19975-oom-mispelled.cwl
+ doc: "Test feature 19975 - retry on exit 137, old misspelled version"
+
+- job: oom/fakeoom2.yml
+ output: {}
+ tool: oom/19975-oom.cwl
+ doc: "Test feature 19975 - retry on memory error"
+
+- job: oom/fakeoom3.yml
+ output: {}
+ tool: oom/19975-oom3.cwl
+ doc: "Test feature 19975 - retry on custom error"
+
+- job: null
+ output:
+ out: out
+ tool: wf/runseparate-wf.cwl
+ doc: "test arv:SeparateRunner"
diff --git a/apps/workbench/app/views/application/_extra_tab_line_buttons.html.erb b/sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.dict
similarity index 100%
rename from apps/workbench/app/views/application/_extra_tab_line_buttons.html.erb
rename to sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.dict
diff --git a/apps/workbench/app/views/application/_index.html.erb b/sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.fa.fai
similarity index 100%
rename from apps/workbench/app/views/application/_index.html.erb
rename to sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.fa.fai
diff --git a/apps/workbench/app/views/application/_tab_line_buttons.html.erb b/sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.dict
similarity index 100%
rename from apps/workbench/app/views/application/_tab_line_buttons.html.erb
rename to sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.dict
diff --git a/apps/workbench/lib/assets/.gitkeep b/sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.fa.fai
similarity index 100%
rename from apps/workbench/lib/assets/.gitkeep
rename to sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.fa.fai
diff --git a/sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl b/sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl
index f864f49206..a7159e3f8a 100644
--- a/sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl
+++ b/sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl
@@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.2
class: Workflow
inputs: []
outputs: []
@@ -14,4 +14,4 @@ steps:
step2:
in: []
out: []
- run: step2.cwl
\ No newline at end of file
+ run: step2.cwl
diff --git a/sdk/cwl/tests/collection_per_tool/collection_per_tool_wrapper.cwl b/sdk/cwl/tests/collection_per_tool/collection_per_tool_wrapper.cwl
new file mode 100644
index 0000000000..847c80945e
--- /dev/null
+++ b/sdk/cwl/tests/collection_per_tool/collection_per_tool_wrapper.cwl
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+ "$graph": [
+ {
+ "class": "Workflow",
+ "hints": [
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
+ ],
+ "id": "#main",
+ "inputs": [],
+ "outputs": [],
+ "requirements": [
+ {
+ "class": "SubworkflowFeatureRequirement"
+ }
+ ],
+ "steps": [
+ {
+ "id": "#main/collection_per_tool.cwl",
+ "in": [],
+ "label": "collection_per_tool.cwl",
+ "out": [],
+ "run": "keep:177002db236f41230905621862cc4230+367/collection_per_tool.cwl"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.2"
+}
diff --git a/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt b/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt
new file mode 100644
index 0000000000..e8e79cc59a
--- /dev/null
+++ b/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt
@@ -0,0 +1,10 @@
+2018-10-03T18:21:16.944508412Z crunchstat: keepcalls 0 put 0 get -- interval 10.0000 seconds 0 put 0 get
+2018-10-03T18:21:16.944508412Z crunchstat: net:keep0 0 tx 0 rx -- interval 10.0000 seconds 0 tx 0 rx
+2018-10-03T18:21:16.944508412Z crunchstat: keepcache 0 hit 0 miss -- interval 10.0000 seconds 0 hit 0 miss
+2018-10-03T18:21:16.944508412Z crunchstat: fuseops 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
+2018-10-03T18:21:16.944508412Z crunchstat: blkio:0:0 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
+2018-10-03T18:21:26.954764471Z crunchstat: keepcalls 0 put 0 get -- interval 10.0000 seconds 0 put 0 get
+2018-10-03T18:21:26.954764471Z crunchstat: net:keep0 0 tx 0 rx -- interval 10.0000 seconds 0 tx 0 rx
+2018-10-03T18:21:26.954764471Z crunchstat: keepcache 0 hit 0 miss -- interval 10.0000 seconds 0 hit 0 miss
+2018-10-03T18:21:26.954764471Z crunchstat: fuseops 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
+2018-10-03T18:21:26.954764471Z crunchstat: blkio:0:0 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
diff --git a/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt b/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt
new file mode 100644
index 0000000000..6580843b16
--- /dev/null
+++ b/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt
@@ -0,0 +1,17 @@
+2018-10-03T18:21:07.823780191Z notice: reading stats from /sys/fs/cgroup/cpuacct//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cgroup.procs
+2018-10-03T18:21:07.823841282Z notice: monitoring temp dir /tmp/crunch-run.9tee4-dz642-lymtndkpy39eibk.438029160
+2018-10-03T18:21:07.823917514Z notice: reading stats from /sys/fs/cgroup/memory//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/memory.stat
+2018-10-03T18:21:07.824136521Z mem 0 cache 0 swap 0 pgmajfault 1187840 rss
+2018-10-03T18:21:07.824187182Z notice: reading stats from /sys/fs/cgroup/cpuacct//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cpuacct.stat
+2018-10-03T18:21:07.824253726Z notice: reading stats from /sys/fs/cgroup/cpuset//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cpuset.cpus
+2018-10-03T18:21:07.824296720Z cpu 0.0000 user 0.0100 sys 20.00 cpus
+2018-10-03T18:21:07.824361476Z notice: reading stats from /sys/fs/cgroup/blkio//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/blkio.io_service_bytes
+2018-10-03T18:21:07.824551021Z statfs 397741461504 available 4869779456 used 402611240960 total
+2018-10-03T18:21:17.824503045Z mem 172032 cache 0 swap 0 pgmajfault 68247552 rss
+2018-10-03T18:21:17.824702097Z cpu 2.0000 user 0.3800 sys 20.00 cpus -- interval 10.0004 seconds 2.0000 user 0.3700 sys
+2018-10-03T18:21:17.824984621Z net:eth0 51930 tx 844687 rx
+2018-10-03T18:21:17.825021992Z statfs 397740937216 available 4870303744 used 402611240960 total -- interval 10.0005 seconds 524288 used
+2018-10-03T18:21:27.824480114Z mem 172032 cache 0 swap 0 pgmajfault 69525504 rss
+2018-10-03T18:21:27.826909728Z cpu 2.0600 user 0.3900 sys 20.00 cpus -- interval 10.0022 seconds 0.0600 user 0.0100 sys
+2018-10-03T18:21:27.827141860Z net:eth0 55888 tx 859480 rx -- interval 10.0022 seconds 3958 tx 14793 rx
+2018-10-03T18:21:27.827177703Z statfs 397744787456 available 4866453504 used 402611240960 total -- interval 10.0022 seconds -3850240 used
diff --git a/sdk/cwl/tests/oom/19975-oom-mispelled.cwl b/sdk/cwl/tests/oom/19975-oom-mispelled.cwl
new file mode 100644
index 0000000000..bbd26b9c9a
--- /dev/null
+++ b/sdk/cwl/tests/oom/19975-oom-mispelled.cwl
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+hints:
+ arv:OutOfMemoryRetry:
+ # legacy misspelled name, should behave exactly the same
+ memoryRetryMultipler: 2
+ ResourceRequirement:
+ ramMin: 256
+ arv:APIRequirement: {}
+inputs:
+ fakeoom: File
+outputs: []
+arguments: [python3, $(inputs.fakeoom)]
diff --git a/sdk/cwl/tests/oom/19975-oom.cwl b/sdk/cwl/tests/oom/19975-oom.cwl
new file mode 100644
index 0000000000..bf3e5cc389
--- /dev/null
+++ b/sdk/cwl/tests/oom/19975-oom.cwl
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+hints:
+ arv:OutOfMemoryRetry:
+ memoryRetryMultiplier: 2
+ ResourceRequirement:
+ ramMin: 256
+ arv:APIRequirement: {}
+inputs:
+ fakeoom: File
+outputs: []
+arguments: [python3, $(inputs.fakeoom)]
diff --git a/sdk/cwl/tests/oom/19975-oom3.cwl b/sdk/cwl/tests/oom/19975-oom3.cwl
new file mode 100644
index 0000000000..bbca110b6f
--- /dev/null
+++ b/sdk/cwl/tests/oom/19975-oom3.cwl
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+hints:
+ arv:OutOfMemoryRetry:
+ memoryRetryMultiplier: 2
+ memoryErrorRegex: Whoops
+ ResourceRequirement:
+ ramMin: 256
+ arv:APIRequirement: {}
+inputs:
+ fakeoom: File
+outputs: []
+arguments: [python3, $(inputs.fakeoom)]
diff --git a/sdk/cwl/tests/oom/fakeoom.py b/sdk/cwl/tests/oom/fakeoom.py
new file mode 100644
index 0000000000..cc0b2ed48e
--- /dev/null
+++ b/sdk/cwl/tests/oom/fakeoom.py
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+import time
+import arvados
+
+api = arvados.api()
+current_container = api.containers().current().execute()
+
+if current_container["runtime_constraints"]["ram"] < (512*1024*1024):
+ sys.exit(137)
diff --git a/sdk/cwl/tests/oom/fakeoom.yml b/sdk/cwl/tests/oom/fakeoom.yml
new file mode 100644
index 0000000000..da95fb6be7
--- /dev/null
+++ b/sdk/cwl/tests/oom/fakeoom.yml
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+fakeoom:
+ class: File
+ location: fakeoom.py
diff --git a/sdk/cwl/tests/oom/fakeoom2.py b/sdk/cwl/tests/oom/fakeoom2.py
new file mode 100644
index 0000000000..89bd1f5c3b
--- /dev/null
+++ b/sdk/cwl/tests/oom/fakeoom2.py
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+import time
+import arvados
+
+api = arvados.api()
+current_container = api.containers().current().execute()
+
+if current_container["runtime_constraints"]["ram"] < (512*1024*1024):
+ raise MemoryError()
diff --git a/sdk/cwl/tests/oom/fakeoom2.yml b/sdk/cwl/tests/oom/fakeoom2.yml
new file mode 100644
index 0000000000..4161252e5d
--- /dev/null
+++ b/sdk/cwl/tests/oom/fakeoom2.yml
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+fakeoom:
+ class: File
+ location: fakeoom2.py
diff --git a/sdk/cwl/tests/oom/fakeoom3.py b/sdk/cwl/tests/oom/fakeoom3.py
new file mode 100644
index 0000000000..460c4a5844
--- /dev/null
+++ b/sdk/cwl/tests/oom/fakeoom3.py
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+import time
+import arvados
+
+api = arvados.api()
+current_container = api.containers().current().execute()
+
+if current_container["runtime_constraints"]["ram"] < (512*1024*1024):
+ print("Whoops")
+ sys.exit(1)
diff --git a/sdk/cwl/tests/oom/fakeoom3.yml b/sdk/cwl/tests/oom/fakeoom3.yml
new file mode 100644
index 0000000000..a6fc03ce46
--- /dev/null
+++ b/sdk/cwl/tests/oom/fakeoom3.yml
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+fakeoom:
+ class: File
+ location: fakeoom3.py
diff --git a/sdk/cwl/tests/test_container.py b/sdk/cwl/tests/test_container.py
index cb57b446da..af8c0b014d 100644
--- a/sdk/cwl/tests/test_container.py
+++ b/sdk/cwl/tests/test_container.py
@@ -8,11 +8,10 @@ from builtins import object
import arvados_cwl
import arvados_cwl.context
import arvados_cwl.util
-from arvados_cwl.arvdocker import arv_docker_clear_cache
+#from arvados_cwl.arvdocker import arv_docker_clear_cache
import copy
import arvados.config
import logging
-import mock
import unittest
import os
import functools
@@ -23,6 +22,9 @@ import cwltool.load_tool
from cwltool.update import INTERNAL_VERSION
from schema_salad.ref_resolver import Loader
from schema_salad.sourceline import cmap
+import io
+
+from unittest import mock
from .matcher import JsonDiffMatcher, StripYAMLComments
from .mock_discovery import get_rootDesc
@@ -61,7 +63,7 @@ class TestContainer(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
def tearDown(self):
root_logger = logging.getLogger('')
@@ -85,7 +87,8 @@ class TestContainer(unittest.TestCase):
"construct_tool_object": runner.arv_make_tool,
"fetcher_constructor": functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=fs_access),
"loader": Loader({}),
- "metadata": cmap({"cwlVersion": INTERNAL_VERSION, "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"})
+ "metadata": cmap({"cwlVersion": INTERNAL_VERSION, "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"}),
+ "default_docker_image": "arvados/jobs:"+arvados_cwl.__version__
})
runtimeContext = arvados_cwl.context.ArvRuntimeContext(
{"work_api": "containers",
@@ -128,13 +131,14 @@ class TestContainer(unittest.TestCase):
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_run(self, keepdocker):
for enable_reuse in (True, False):
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -186,7 +190,7 @@ class TestContainer(unittest.TestCase):
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}))
@@ -201,6 +205,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 3600
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -277,7 +282,7 @@ class TestContainer(unittest.TestCase):
'scheduling_parameters': {
'partitions': ['blurb']
},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}
@@ -298,6 +303,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -410,7 +416,7 @@ class TestContainer(unittest.TestCase):
'cwd': '/var/spool/cwl',
'scheduling_parameters': {
},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}
@@ -429,6 +435,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -497,7 +504,7 @@ class TestContainer(unittest.TestCase):
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}))
@@ -513,11 +520,47 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
+ runner.api.container_requests().get().execute.return_value = {"container_uuid":"zzzzz-xvhdp-zzzzzzzzzzzzzzz"}
+
runner.api.containers().get().execute.return_value = {"state":"Complete",
"output": "abc+123",
"exit_code": 0}
- col().open.return_value = []
+ # Need to noop-out the close method otherwise it gets
+ # discarded when closed and we can't call getvalue() to check
+ # it.
+ class NoopCloseStringIO(io.StringIO):
+ def close(self):
+ pass
+
+ usage_report = NoopCloseStringIO()
+ def colreader_action(name, mode):
+ nonlocal usage_report
+ if name == "node.json":
+ return io.StringIO("""{
+ "ProviderType": "c5.large",
+ "VCPUs": 2,
+ "RAM": 4294967296,
+ "IncludedScratch": 8000000000000,
+ "AddedScratch": 0,
+ "Price": 0.085,
+ "Preemptible": false,
+ "CUDA": {
+ "DriverVersion": "",
+ "HardwareCapability": "",
+ "DeviceCount": 0
+ }
+}""")
+ if name == 'crunchstat.txt':
+ return open("tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt", "rt")
+ if name == 'arv-mount.txt':
+ return open("tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt", "rt")
+ if name == 'usage_report.html':
+ return usage_report
+ return None
+
+ col().open.side_effect = colreader_action
+ col().__iter__.return_value = ['node.json', 'crunchstat.txt', 'arv-mount.txt']
loadingContext, runtimeContext = self.helper(runner)
@@ -534,6 +577,7 @@ class TestContainer(unittest.TestCase):
arvjob.successCodes = [0]
arvjob.outdir = "/var/spool/cwl"
arvjob.output_ttl = 3600
+ arvjob.uuid = "zzzzz-xvhdp-zzzzzzzzzzzzzz1"
arvjob.collect_outputs.return_value = {"out": "stuff"}
@@ -543,16 +587,25 @@ class TestContainer(unittest.TestCase):
"output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
"uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
"container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
- "modified_at": "2017-05-26T12:01:22Z"
+ "modified_at": "2017-05-26T12:01:22Z",
+ "properties": {},
+ "name": "testjob"
})
self.assertFalse(api.collections().create.called)
self.assertFalse(runner.runtime_status_error.called)
+ # Assert that something was written to the usage report
+ self.assertTrue(len(usage_report.getvalue()) > 0)
+
arvjob.collect_outputs.assert_called_with("keep:abc+123", 0)
arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
runner.add_intermediate_output.assert_called_with("zzzzz-4zz18-zzzzzzzzzzzzzz2")
+ runner.api.container_requests().update.assert_called_with(uuid="zzzzz-xvhdp-zzzzzzzzzzzzzz1",
+ body={'container_request': {'properties': {'cwl_output': {'out': 'stuff'}}}})
+
+
# Test to make sure we dont call runtime_status_update if we already did
# some where higher up in the call stack
@mock.patch("arvados_cwl.util.get_current_container")
@@ -636,14 +689,18 @@ class TestContainer(unittest.TestCase):
"output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
"uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
"container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
- "modified_at": "2017-05-26T12:01:22Z"
+ "modified_at": "2017-05-26T12:01:22Z",
+ "properties": {}
})
- rts_mock.assert_called_with(
- 'error',
- 'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
- ' ** log is empty **'
- )
+ rts_mock.assert_has_calls([
+ mock.call('error',
+ 'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
+ ' ** log is empty **'
+ ),
+ mock.call('warning',
+ 'arvados.cwl-runner: [container testjob] unable to generate resource usage report'
+ )])
arvjob.output_callback.assert_called_with({"out": "stuff"}, "permanentFail")
# The test passes no builder.resources
@@ -655,6 +712,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -733,7 +791,38 @@ class TestContainer(unittest.TestCase):
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': {
+ "p1": {
+ "basename": "99999999999999999999999999999994+44",
+ "class": "Directory",
+ "dirname": "/keep",
+ "http://arvados.org/cwl#collectionUUID": "zzzzz-4zz18-zzzzzzzzzzzzzzz",
+ "listing": [
+ {
+ "basename": "file1",
+ "class": "File",
+ "dirname": "/keep/99999999999999999999999999999994+44",
+ "location": "keep:99999999999999999999999999999994+44/file1",
+ "nameext": "",
+ "nameroot": "file1",
+ "path": "/keep/99999999999999999999999999999994+44/file1",
+ "size": 0
+ },
+ {
+ "basename": "file2",
+ "class": "File",
+ "dirname": "/keep/99999999999999999999999999999994+44",
+ "location": "keep:99999999999999999999999999999994+44/file2",
+ "nameext": "",
+ "nameroot": "file2",
+ "path": "/keep/99999999999999999999999999999994+44/file2",
+ "size": 0
+ }
+ ],
+ "location": "keep:99999999999999999999999999999994+44",
+ "path": "/keep/99999999999999999999999999999994+44"
+ }
+ }},
'secret_mounts': {},
'output_storage_classes': ["default"]
}))
@@ -748,6 +837,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -827,7 +917,7 @@ class TestContainer(unittest.TestCase):
'command': ['md5sum', 'example.conf'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': job_order},
"secret_mounts": {
"/var/spool/cwl/example.conf": {
"content": "username: user\npassword: blorp\n",
@@ -846,6 +936,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -884,13 +975,14 @@ class TestContainer(unittest.TestCase):
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_setting_storage_class(self, keepdocker):
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -949,7 +1041,7 @@ class TestContainer(unittest.TestCase):
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["foo_sc", "bar_sc"]
}))
@@ -959,13 +1051,14 @@ class TestContainer(unittest.TestCase):
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_setting_process_properties(self, keepdocker):
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -1037,6 +1130,7 @@ class TestContainer(unittest.TestCase):
'scheduling_parameters': {},
'properties': {
"baz": "blorp",
+ "cwl_input": {"x": "blorp"},
"foo": "bar",
"quux": {
"q1": 1,
@@ -1053,13 +1147,14 @@ class TestContainer(unittest.TestCase):
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_cuda_requirement(self, keepdocker):
arvados_cwl.add_arv_hints()
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -1145,7 +1240,7 @@ class TestContainer(unittest.TestCase):
'command': ['nvidia-smi'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}))
@@ -1157,13 +1252,13 @@ class TestContainer(unittest.TestCase):
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_match_local_docker(self, keepdocker, determine_image_id):
arvados_cwl.add_arv_hints()
- arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz4", {"dockerhash": "456"}),
("zzzzz-4zz18-zzzzzzzzzzzzzz3", {"dockerhash": "123"})]
@@ -1219,7 +1314,7 @@ class TestContainer(unittest.TestCase):
'command': ['echo'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}
@@ -1230,7 +1325,7 @@ class TestContainer(unittest.TestCase):
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(container_request))
- arv_docker_clear_cache()
+ runtimeContext.cached_docker_lookups.clear()
runtimeContext.match_local_docker = True
container_request['container_image'] = '99999999999999999999999999999993+99'
container_request['name'] = 'test_run_True_2'
@@ -1248,13 +1343,14 @@ class TestContainer(unittest.TestCase):
arvados_cwl.add_arv_hints()
for enable_preemptible in (None, True, False):
for preemptible_hint in (None, True, False):
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": "20210628"}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -1332,7 +1428,7 @@ class TestContainer(unittest.TestCase):
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': sched,
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'secret_mounts': {},
'output_storage_classes': ["default"]
}))
@@ -1347,6 +1443,7 @@ class TestContainer(unittest.TestCase):
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api._rootDesc = {"revision": rev}
+ runner.api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
@@ -1393,7 +1490,7 @@ class TestContainer(unittest.TestCase):
class TestWorkflow(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
- arv_docker_clear_cache()
+ #arv_docker_clear_cache()
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
@@ -1412,7 +1509,8 @@ class TestWorkflow(unittest.TestCase):
"make_fs_access": make_fs_access,
"loader": document_loader,
"metadata": {"cwlVersion": INTERNAL_VERSION, "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"},
- "construct_tool_object": runner.arv_make_tool})
+ "construct_tool_object": runner.arv_make_tool,
+ "default_docker_image": "arvados/jobs:"+arvados_cwl.__version__})
runtimeContext = arvados_cwl.context.ArvRuntimeContext(
{"work_api": "containers",
"basedir": "",
@@ -1434,6 +1532,7 @@ class TestWorkflow(unittest.TestCase):
api = mock.MagicMock()
api._rootDesc = get_rootDesc()
+ api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
runner = arvados_cwl.executor.ArvCwlExecutor(api)
self.assertEqual(runner.work_api, 'containers')
@@ -1521,7 +1620,19 @@ class TestWorkflow(unittest.TestCase):
"output_path": "/var/spool/cwl",
"output_ttl": 0,
"priority": 500,
- "properties": {},
+ "properties": {'cwl_input': {
+ "fileblub": {
+ "basename": "token.txt",
+ "class": "File",
+ "dirname": "/keep/99999999999999999999999999999999+118",
+ "location": "keep:99999999999999999999999999999999+118/token.txt",
+ "nameext": ".txt",
+ "nameroot": "token",
+ "path": "/keep/99999999999999999999999999999999+118/token.txt",
+ "size": 0
+ },
+ "sleeptime": 5
+ }},
"runtime_constraints": {
"ram": 1073741824,
"vcpus": 1
@@ -1554,6 +1665,7 @@ class TestWorkflow(unittest.TestCase):
api = mock.MagicMock()
api._rootDesc = get_rootDesc()
+ api.config.return_value = {"Containers": {"DefaultKeepCacheRAM": 256<<20}}
runner = arvados_cwl.executor.ArvCwlExecutor(api)
self.assertEqual(runner.work_api, 'containers')
@@ -1594,7 +1706,7 @@ class TestWorkflow(unittest.TestCase):
'name': u'echo-subwf',
'secret_mounts': {},
'runtime_constraints': {'API': True, 'vcpus': 3, 'ram': 1073741824},
- 'properties': {},
+ 'properties': {'cwl_input': {}},
'priority': 500,
'mounts': {
'/var/spool/cwl/cwl.input.yml': {
diff --git a/sdk/cwl/tests/test_copy_deps.py b/sdk/cwl/tests/test_copy_deps.py
index 853a7d3609..28a5915b11 100644
--- a/sdk/cwl/tests/test_copy_deps.py
+++ b/sdk/cwl/tests/test_copy_deps.py
@@ -3,14 +3,46 @@
# SPDX-License-Identifier: Apache-2.0
import arvados
+import arvados.collection
import subprocess
api = arvados.api()
+workflow_content = """# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+baseCommand: echo
+inputs:
+ message:
+ type: File
+ inputBinding:
+ position: 1
+ default:
+ class: File
+ location: keep:d7514270f356df848477718d58308cc4+94/b
+
+outputs: []
+"""
+
+expect_file = "19070-copy-deps.cwl"
+
+def check_workflow_content(uuid):
+ c = arvados.collection.Collection(uuid)
+ try:
+ with c.open(expect_file) as f:
+ content = f.read()
+ match = (content == workflow_content)
+ return match
+ except:
+ return False
+
def check_contents(group, wf_uuid):
contents = api.groups().contents(uuid=group["uuid"]).execute()
- if len(contents["items"]) != 3:
- raise Exception("Expected 3 items in "+group["uuid"]+" was "+len(contents["items"]))
+ if len(contents["items"]) != 4:
+ raise Exception("Expected 4 items in "+group["uuid"]+" was "+str(len(contents["items"])))
found = False
for c in contents["items"]:
@@ -33,6 +65,13 @@ def check_contents(group, wf_uuid):
if not found:
raise Exception("Couldn't find jobs image dependency")
+ found = False
+ for c in contents["items"]:
+ if c["kind"] == "arvados#collection" and check_workflow_content(c["portable_data_hash"]):
+ found = True
+ if not found:
+ raise Exception("Couldn't find collection containing expected "+expect_file)
+
def test_create():
group = api.groups().create(body={"group": {"name": "test-19070-project-1", "group_class": "project"}}, ensure_unique_name=True).execute()
@@ -42,7 +81,7 @@ def test_create():
raise Exception("Expected 0 items")
# Create workflow, by default should also copy dependencies
- cmd = ["arvados-cwl-runner", "--create-workflow", "--project-uuid", group["uuid"], "19070-copy-deps.cwl"]
+ cmd = ["arvados-cwl-runner", "--disable-git", "--create-workflow", "--project-uuid", group["uuid"], "19070-copy-deps.cwl"]
print(" ".join(cmd))
wf_uuid = subprocess.check_output(cmd)
wf_uuid = wf_uuid.decode("utf-8").strip()
@@ -59,14 +98,14 @@ def test_update():
raise Exception("Expected 0 items")
# Create workflow, but with --no-copy-deps it shouldn't copy anything
- cmd = ["arvados-cwl-runner", "--no-copy-deps", "--create-workflow", "--project-uuid", group["uuid"], "19070-copy-deps.cwl"]
+ cmd = ["arvados-cwl-runner", "--disable-git", "--no-copy-deps", "--create-workflow", "--project-uuid", group["uuid"], "19070-copy-deps.cwl"]
print(" ".join(cmd))
wf_uuid = subprocess.check_output(cmd)
wf_uuid = wf_uuid.decode("utf-8").strip()
contents = api.groups().contents(uuid=group["uuid"]).execute()
- if len(contents["items"]) != 1:
- raise Exception("Expected 1 items")
+ if len(contents["items"]) != 2:
+ raise Exception("Expected 2 items")
found = False
for c in contents["items"]:
@@ -75,8 +114,15 @@ def test_update():
if not found:
raise Exception("Couldn't find workflow")
+ found = False
+ for c in contents["items"]:
+ if c["kind"] == "arvados#collection" and check_workflow_content(c["portable_data_hash"]):
+ found = True
+ if not found:
+ raise Exception("Couldn't find collection containing expected "+expect_file)
+
# Updating by default will copy missing items
- cmd = ["arvados-cwl-runner", "--update-workflow", wf_uuid, "19070-copy-deps.cwl"]
+ cmd = ["arvados-cwl-runner", "--disable-git", "--update-workflow", wf_uuid, "19070-copy-deps.cwl"]
print(" ".join(cmd))
wf_uuid = subprocess.check_output(cmd)
wf_uuid = wf_uuid.decode("utf-8").strip()
@@ -94,7 +140,7 @@ def test_execute():
raise Exception("Expected 0 items")
# Execute workflow, shouldn't copy anything.
- cmd = ["arvados-cwl-runner", "--project-uuid", group["uuid"], "19070-copy-deps.cwl"]
+ cmd = ["arvados-cwl-runner", "--disable-git", "--project-uuid", group["uuid"], "19070-copy-deps.cwl"]
print(" ".join(cmd))
wf_uuid = subprocess.check_output(cmd)
wf_uuid = wf_uuid.decode("utf-8").strip()
@@ -123,7 +169,7 @@ def test_execute():
raise Exception("Didn't expect to find jobs image dependency")
# Execute workflow with --copy-deps
- cmd = ["arvados-cwl-runner", "--project-uuid", group["uuid"], "--copy-deps", "19070-copy-deps.cwl"]
+ cmd = ["arvados-cwl-runner", "--disable-git", "--project-uuid", group["uuid"], "--copy-deps", "19070-copy-deps.cwl"]
print(" ".join(cmd))
wf_uuid = subprocess.check_output(cmd)
wf_uuid = wf_uuid.decode("utf-8").strip()
diff --git a/sdk/cwl/tests/test_fsaccess.py b/sdk/cwl/tests/test_fsaccess.py
index f83612a8b0..c086f0e832 100644
--- a/sdk/cwl/tests/test_fsaccess.py
+++ b/sdk/cwl/tests/test_fsaccess.py
@@ -3,13 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
import logging
import os
+from unittest import mock
+
import arvados
import arvados.keep
import arvados.collection
diff --git a/sdk/cwl/tests/test_http.py b/sdk/cwl/tests/test_http.py
deleted file mode 100644
index 650b5f0598..0000000000
--- a/sdk/cwl/tests/test_http.py
+++ /dev/null
@@ -1,289 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from future import standard_library
-standard_library.install_aliases()
-
-import copy
-import io
-import functools
-import hashlib
-import json
-import logging
-import mock
-import sys
-import unittest
-import datetime
-
-import arvados
-import arvados.collection
-import arvados_cwl
-import arvados_cwl.runner
-import arvados.keep
-
-from .matcher import JsonDiffMatcher, StripYAMLComments
-from .mock_discovery import get_rootDesc
-
-import arvados_cwl.http
-
-import ruamel.yaml as yaml
-
-
-class TestHttpToKeep(unittest.TestCase):
-
- @mock.patch("requests.get")
- @mock.patch("arvados.collection.Collection")
- def test_http_get(self, collectionmock, getmock):
- api = mock.MagicMock()
-
- api.collections().list().execute.return_value = {
- "items": []
- }
-
- cm = mock.MagicMock()
- cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
- cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
- collectionmock.return_value = cm
-
- req = mock.MagicMock()
- req.status_code = 200
- req.headers = {}
- req.iter_content.return_value = ["abc"]
- getmock.return_value = req
-
- utcnow = mock.MagicMock()
- utcnow.return_value = datetime.datetime(2018, 5, 15)
-
- r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
- self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
-
- getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
-
- cm.open.assert_called_with("file1.txt", "wb")
- cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt",
- owner_uuid=None, ensure_unique_name=True)
-
- api.collections().update.assert_has_calls([
- mock.call(uuid=cm.manifest_locator(),
- body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
- ])
-
-
- @mock.patch("requests.get")
- @mock.patch("arvados.collection.CollectionReader")
- def test_http_expires(self, collectionmock, getmock):
- api = mock.MagicMock()
-
- api.collections().list().execute.return_value = {
- "items": [{
- "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
- "portable_data_hash": "99999999999999999999999999999998+99",
- "properties": {
- 'http://example.com/file1.txt': {
- 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
- 'Expires': 'Tue, 17 May 2018 00:00:00 GMT'
- }
- }
- }]
- }
-
- cm = mock.MagicMock()
- cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
- cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
- cm.keys.return_value = ["file1.txt"]
- collectionmock.return_value = cm
-
- req = mock.MagicMock()
- req.status_code = 200
- req.headers = {}
- req.iter_content.return_value = ["abc"]
- getmock.return_value = req
-
- utcnow = mock.MagicMock()
- utcnow.return_value = datetime.datetime(2018, 5, 16)
-
- r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
- self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
-
- getmock.assert_not_called()
-
-
- @mock.patch("requests.get")
- @mock.patch("arvados.collection.CollectionReader")
- def test_http_cache_control(self, collectionmock, getmock):
- api = mock.MagicMock()
-
- api.collections().list().execute.return_value = {
- "items": [{
- "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
- "portable_data_hash": "99999999999999999999999999999998+99",
- "properties": {
- 'http://example.com/file1.txt': {
- 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
- 'Cache-Control': 'max-age=172800'
- }
- }
- }]
- }
-
- cm = mock.MagicMock()
- cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
- cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
- cm.keys.return_value = ["file1.txt"]
- collectionmock.return_value = cm
-
- req = mock.MagicMock()
- req.status_code = 200
- req.headers = {}
- req.iter_content.return_value = ["abc"]
- getmock.return_value = req
-
- utcnow = mock.MagicMock()
- utcnow.return_value = datetime.datetime(2018, 5, 16)
-
- r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
- self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
-
- getmock.assert_not_called()
-
-
- @mock.patch("requests.get")
- @mock.patch("requests.head")
- @mock.patch("arvados.collection.Collection")
- def test_http_expired(self, collectionmock, headmock, getmock):
- api = mock.MagicMock()
-
- api.collections().list().execute.return_value = {
- "items": [{
- "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
- "portable_data_hash": "99999999999999999999999999999998+99",
- "properties": {
- 'http://example.com/file1.txt': {
- 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
- 'Expires': 'Tue, 16 May 2018 00:00:00 GMT'
- }
- }
- }]
- }
-
- cm = mock.MagicMock()
- cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz4"
- cm.portable_data_hash.return_value = "99999999999999999999999999999997+99"
- cm.keys.return_value = ["file1.txt"]
- collectionmock.return_value = cm
-
- req = mock.MagicMock()
- req.status_code = 200
- req.headers = {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}
- req.iter_content.return_value = ["def"]
- getmock.return_value = req
- headmock.return_value = req
-
- utcnow = mock.MagicMock()
- utcnow.return_value = datetime.datetime(2018, 5, 17)
-
- r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
- self.assertEqual(r, "keep:99999999999999999999999999999997+99/file1.txt")
-
- getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
-
- cm.open.assert_called_with("file1.txt", "wb")
- cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt",
- owner_uuid=None, ensure_unique_name=True)
-
- api.collections().update.assert_has_calls([
- mock.call(uuid=cm.manifest_locator(),
- body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}}}})
- ])
-
-
- @mock.patch("requests.get")
- @mock.patch("requests.head")
- @mock.patch("arvados.collection.CollectionReader")
- def test_http_etag(self, collectionmock, headmock, getmock):
- api = mock.MagicMock()
-
- api.collections().list().execute.return_value = {
- "items": [{
- "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
- "portable_data_hash": "99999999999999999999999999999998+99",
- "properties": {
- 'http://example.com/file1.txt': {
- 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
- 'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
- 'ETag': '123456'
- }
- }
- }]
- }
-
- cm = mock.MagicMock()
- cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
- cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
- cm.keys.return_value = ["file1.txt"]
- collectionmock.return_value = cm
-
- req = mock.MagicMock()
- req.status_code = 200
- req.headers = {
- 'Date': 'Tue, 17 May 2018 00:00:00 GMT',
- 'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
- 'ETag': '123456'
- }
- headmock.return_value = req
-
- utcnow = mock.MagicMock()
- utcnow.return_value = datetime.datetime(2018, 5, 17)
-
- r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
- self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
-
- getmock.assert_not_called()
- cm.open.assert_not_called()
-
- api.collections().update.assert_has_calls([
- mock.call(uuid=cm.manifest_locator(),
- body={"collection":{"properties": {'http://example.com/file1.txt': {
- 'Date': 'Tue, 17 May 2018 00:00:00 GMT',
- 'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
- 'ETag': '123456'
- }}}})
- ])
-
- @mock.patch("requests.get")
- @mock.patch("arvados.collection.Collection")
- def test_http_content_disp(self, collectionmock, getmock):
- api = mock.MagicMock()
-
- api.collections().list().execute.return_value = {
- "items": []
- }
-
- cm = mock.MagicMock()
- cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
- cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
- collectionmock.return_value = cm
-
- req = mock.MagicMock()
- req.status_code = 200
- req.headers = {"Content-Disposition": "attachment; filename=file1.txt"}
- req.iter_content.return_value = ["abc"]
- getmock.return_value = req
-
- utcnow = mock.MagicMock()
- utcnow.return_value = datetime.datetime(2018, 5, 15)
-
- r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
- self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
-
- getmock.assert_called_with("http://example.com/download?fn=/file1.txt", stream=True, allow_redirects=True)
-
- cm.open.assert_called_with("file1.txt", "wb")
- cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Fdownload%3Ffn%3D%2Ffile1.txt",
- owner_uuid=None, ensure_unique_name=True)
-
- api.collections().update.assert_has_calls([
- mock.call(uuid=cm.manifest_locator(),
- body={"collection":{"properties": {"http://example.com/download?fn=/file1.txt": {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
- ])
diff --git a/sdk/cwl/tests/test_make_output.py b/sdk/cwl/tests/test_make_output.py
index dd1da0b524..0a4fbdc60f 100644
--- a/sdk/cwl/tests/test_make_output.py
+++ b/sdk/cwl/tests/test_make_output.py
@@ -8,11 +8,12 @@ standard_library.install_aliases()
import functools
import json
import logging
-import mock
import os
import io
import unittest
+from unittest import mock
+
import arvados
import arvados_cwl
import arvados_cwl.executor
diff --git a/sdk/cwl/tests/test_pathmapper.py b/sdk/cwl/tests/test_pathmapper.py
index 194092db7a..1a13fc7079 100644
--- a/sdk/cwl/tests/test_pathmapper.py
+++ b/sdk/cwl/tests/test_pathmapper.py
@@ -3,13 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
import logging
import os
+from unittest import mock
+
import arvados
import arvados.keep
import arvados.collection
diff --git a/sdk/cwl/tests/test_submit.py b/sdk/cwl/tests/test_submit.py
index a726ec5017..a137325a44 100644
--- a/sdk/cwl/tests/test_submit.py
+++ b/sdk/cwl/tests/test_submit.py
@@ -4,32 +4,22 @@
from future import standard_library
standard_library.install_aliases()
-from builtins import object
-from builtins import str
-from future.utils import viewvalues
import copy
import io
+import itertools
import functools
import hashlib
import json
import logging
-import mock
import sys
import unittest
import cwltool.process
import re
+import os
-from io import BytesIO
-
-# StringIO.StringIO and io.StringIO have different behavior write() is
-# called with both python2 (byte) strings and unicode strings
-# (specifically there's some logging in cwltool that causes trouble).
-# This isn't a problem on python3 because all string are unicode.
-if sys.version_info[0] < 3:
- from StringIO import StringIO
-else:
- from io import StringIO
+from io import BytesIO, StringIO
+from unittest import mock
import arvados
import arvados.collection
@@ -41,11 +31,11 @@ import arvados.keep
from .matcher import JsonDiffMatcher, StripYAMLComments
from .mock_discovery import get_rootDesc
-import ruamel.yaml as yaml
+import ruamel.yaml
_rootDesc = None
-def stubs(wfname='submit_wf.cwl'):
+def stubs(wfdetails=('submit_wf.cwl', None)):
def outer_wrapper(func, *rest):
@functools.wraps(func)
@mock.patch("arvados_cwl.arvdocker.determine_image_id")
@@ -58,13 +48,23 @@ def stubs(wfname='submit_wf.cwl'):
uuid4, determine_image_id, *args, **kwargs):
class Stubs(object):
pass
+
+ wfname = wfdetails[0]
+ wfpath = wfdetails[1]
+
stubs = Stubs()
stubs.events = events
stubs.keepdocker = keepdocker
uuid4.side_effect = ["df80736f-f14d-4b10-b2e3-03aa27f034bb", "df80736f-f14d-4b10-b2e3-03aa27f034b1",
"df80736f-f14d-4b10-b2e3-03aa27f034b2", "df80736f-f14d-4b10-b2e3-03aa27f034b3",
- "df80736f-f14d-4b10-b2e3-03aa27f034b4", "df80736f-f14d-4b10-b2e3-03aa27f034b5"]
+ "df80736f-f14d-4b10-b2e3-03aa27f034b4", "df80736f-f14d-4b10-b2e3-03aa27f034b5",
+ "df80736f-f14d-4b10-b2e3-03aa27f034b6", "df80736f-f14d-4b10-b2e3-03aa27f034b7",
+ "df80736f-f14d-4b10-b2e3-03aa27f034b8", "df80736f-f14d-4b10-b2e3-03aa27f034b9",
+ "df80736f-f14d-4b10-b2e3-03aa27f034c0", "df80736f-f14d-4b10-b2e3-03aa27f034c1",
+ "df80736f-f14d-4b10-b2e3-03aa27f034c2", "df80736f-f14d-4b10-b2e3-03aa27f034c3",
+ "df80736f-f14d-4b10-b2e3-03aa27f034c4", "df80736f-f14d-4b10-b2e3-03aa27f034c5",
+ "df80736f-f14d-4b10-b2e3-03aa27f034c6", "df80736f-f14d-4b10-b2e3-03aa27f034c7"]
determine_image_id.return_value = None
@@ -130,7 +130,7 @@ def stubs(wfname='submit_wf.cwl'):
return CollectionExecute(created_collections[uuid])
def collection_getstub(created_collections, uuid):
- for v in viewvalues(created_collections):
+ for v in created_collections.values():
if uuid in (v["uuid"], v["portable_data_hash"]):
return CollectionExecute(v)
@@ -174,12 +174,6 @@ def stubs(wfname='submit_wf.cwl'):
stubs.api.collections().create.side_effect = functools.partial(collection_createstub, created_collections)
stubs.api.collections().get.side_effect = functools.partial(collection_getstub, created_collections)
- stubs.expect_job_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
- stubs.api.jobs().create().execute.return_value = {
- "uuid": stubs.expect_job_uuid,
- "state": "Queued",
- }
-
stubs.expect_container_request_uuid = "zzzzz-xvhdp-zzzzzzzzzzzzzzz"
stubs.api.container_requests().create().execute.return_value = {
"uuid": stubs.expect_container_request_uuid,
@@ -187,94 +181,32 @@ def stubs(wfname='submit_wf.cwl'):
"state": "Queued"
}
- stubs.expect_pipeline_template_uuid = "zzzzz-d1hrv-zzzzzzzzzzzzzzz"
- stubs.api.pipeline_templates().create().execute.return_value = {
- "uuid": stubs.expect_pipeline_template_uuid,
- }
- stubs.expect_job_spec = {
- 'runtime_constraints': {
- 'docker_image': '999999999999999999999999999999d3+99',
- 'min_ram_mb_per_node': 1024
- },
- 'script_parameters': {
- 'x': {
- 'basename': 'blorp.txt',
- 'location': 'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
- 'class': 'File'
- },
- 'y': {
- 'basename': '99999999999999999999999999999998+99',
- 'location': 'keep:99999999999999999999999999999998+99',
- 'class': 'Directory'
- },
- 'z': {
- 'basename': 'anonymous',
- "listing": [{
- "basename": "renamed.txt",
- "class": "File",
- "location": "keep:99999999999999999999999999999998+99/file1.txt",
- "size": 0
- }],
- 'class': 'Directory'
- },
- 'cwl:tool': '57ad063d64c60dbddc027791f0649211+60/workflow.cwl#main'
- },
- 'repository': 'arvados',
- 'script_version': 'master',
- 'minimum_script_version': '570509ab4d2ef93d870fd2b1f2eab178afb1bad9',
- 'script': 'cwl-runner'
- }
- stubs.pipeline_component = stubs.expect_job_spec.copy()
- stubs.expect_pipeline_instance = {
- 'name': 'submit_wf.cwl',
- 'state': 'RunningOnServer',
- 'owner_uuid': None,
- "components": {
- "cwl-runner": {
- 'runtime_constraints': {'docker_image': '999999999999999999999999999999d3+99', 'min_ram_mb_per_node': 1024},
- 'script_parameters': {
- 'y': {"value": {'basename': '99999999999999999999999999999998+99', 'location': 'keep:99999999999999999999999999999998+99', 'class': 'Directory'}},
- 'x': {"value": {
- 'basename': 'blorp.txt',
- 'class': 'File',
- 'location': 'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
- "size": 16
- }},
- 'z': {"value": {'basename': 'anonymous', 'class': 'Directory',
- 'listing': [
- {
- 'basename': 'renamed.txt',
- 'class': 'File', 'location':
- 'keep:99999999999999999999999999999998+99/file1.txt',
- 'size': 0
- }
- ]}},
- 'cwl:tool': '57ad063d64c60dbddc027791f0649211+60/workflow.cwl#main',
- 'arv:debug': True,
- 'arv:enable_reuse': True,
- 'arv:on_error': 'continue'
- },
- 'repository': 'arvados',
- 'script_version': 'master',
- 'minimum_script_version': '570509ab4d2ef93d870fd2b1f2eab178afb1bad9',
- 'script': 'cwl-runner',
- 'job': {'state': 'Queued', 'uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'}
- }
- }
- }
- stubs.pipeline_create = copy.deepcopy(stubs.expect_pipeline_instance)
- stubs.expect_pipeline_uuid = "zzzzz-d1hrv-zzzzzzzzzzzzzzz"
- stubs.pipeline_create["uuid"] = stubs.expect_pipeline_uuid
- stubs.pipeline_with_job = copy.deepcopy(stubs.pipeline_create)
- stubs.pipeline_with_job["components"]["cwl-runner"]["job"] = {
- "uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
- "state": "Queued"
- }
- stubs.api.pipeline_instances().create().execute.return_value = stubs.pipeline_create
- stubs.api.pipeline_instances().get().execute.return_value = stubs.pipeline_with_job
+ cwd = os.getcwd()
+ filepath = os.path.join(cwd, "tests/wf/submit_wf_wrapper.cwl")
+ with open(filepath) as f:
+ yaml = ruamel.yaml.YAML(typ='rt', pure=True)
+ expect_packed_workflow = yaml.load(f)
+
+ if wfpath is None:
+ wfpath = wfname
- with open("tests/wf/submit_wf_packed.cwl") as f:
- expect_packed_workflow = yaml.round_trip_load(f)
+ gitinfo_workflow = copy.deepcopy(expect_packed_workflow)
+ gitinfo_workflow["$graph"][0]["id"] = "file://%s/tests/wf/%s" % (cwd, wfpath)
+ mocktool = mock.NonCallableMock(tool=gitinfo_workflow["$graph"][0], metadata=gitinfo_workflow)
+
+ stubs.git_info = arvados_cwl.executor.ArvCwlExecutor.get_git_info(mocktool)
+ expect_packed_workflow.update(stubs.git_info)
+
+ stubs.git_props = {"arv:"+k.split("#", 1)[1]: v for k,v in stubs.git_info.items()}
+
+ step_name = "%s (%s)" % (wfpath, stubs.git_props["arv:gitDescribe"])
+ if wfname == wfpath:
+ container_name = "%s (%s)" % (wfpath, stubs.git_props["arv:gitDescribe"])
+ else:
+ container_name = wfname
+
+ expect_packed_workflow["$graph"][0]["steps"][0]["id"] = "#main/"+step_name
+ expect_packed_workflow["$graph"][0]["steps"][0]["label"] = container_name
stubs.expect_container_spec = {
'priority': 500,
@@ -321,12 +253,12 @@ def stubs(wfname='submit_wf.cwl'):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- '--output-name=Output from workflow '+wfname,
+ '--output-name=Output from workflow '+container_name,
'--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
- 'name': wfname,
+ 'name': container_name,
'container_image': '999999999999999999999999999999d3+99',
- 'output_name': 'Output from workflow '+wfname,
+ 'output_name': 'Output from workflow %s' % (container_name),
'output_path': '/var/spool/cwl',
'cwd': '/var/spool/cwl',
'runtime_constraints': {
@@ -334,8 +266,8 @@ def stubs(wfname='submit_wf.cwl'):
'vcpus': 1,
'ram': (1024+256)*1024*1024
},
+ 'properties': stubs.git_props,
'use_existing': False,
- 'properties': {},
'secret_mounts': {}
}
@@ -359,7 +291,7 @@ class TestSubmit(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
- arvados_cwl.arvdocker.arv_docker_clear_cache()
+ #arvados_cwl.arvdocker.arv_docker_clear_cache()
def tearDown(self):
root_logger = logging.getLogger('')
@@ -369,7 +301,7 @@ class TestSubmit(unittest.TestCase):
root_logger.handlers = handlers
@mock.patch("time.sleep")
- @stubs
+ @stubs()
def test_submit_invalid_runner_ram(self, stubs, tm):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--debug", "--submit-runner-ram=-2048",
@@ -378,7 +310,7 @@ class TestSubmit(unittest.TestCase):
self.assertEqual(exited, 1)
- @stubs
+ @stubs()
def test_submit_container(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
@@ -390,7 +322,7 @@ class TestSubmit(unittest.TestCase):
'manifest_text':
'. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
'replication_desired': None,
- 'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+ 'name': 'submit_wf.cwl ('+ stubs.git_props["arv:gitDescribe"] +') input (169f39d466a5438ac4a90e779bf750c7+53)',
}), ensure_unique_name=False),
mock.call(body=JsonDiffMatcher({
'manifest_text':
@@ -408,7 +340,7 @@ class TestSubmit(unittest.TestCase):
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_tool(self, stubs):
# test for issue #16139
exited = arvados_cwl.main(
@@ -420,7 +352,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_no_reuse(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--disable-reuse",
@@ -433,7 +365,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--disable-reuse', "--collection-cache-size=256",
- "--output-name=Output from workflow submit_wf.cwl",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
'--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
expect_container["use_existing"] = False
@@ -444,7 +376,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs('submit_wf_no_reuse.cwl')
+ @stubs(('submit_wf_no_reuse.cwl', None))
def test_submit_container_reuse_disabled_by_workflow(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
@@ -453,24 +385,19 @@ class TestSubmit(unittest.TestCase):
self.assertEqual(exited, 0)
expect_container = copy.deepcopy(stubs.expect_container_spec)
- expect_container["command"] = [
- 'arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate', '--disable-color',
- '--eval-timeout=20', '--thread-count=0',
- '--disable-reuse', "--collection-cache-size=256",
- '--output-name=Output from workflow submit_wf_no_reuse.cwl', '--debug', '--on-error=continue',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ expect_container["command"] = ["--disable-reuse" if v == "--enable-reuse" else v for v in expect_container["command"]]
expect_container["use_existing"] = False
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["hints"] = [
{
- "class": "http://arvados.org/cwl#ReuseRequirement",
+ "class": "WorkReuse",
"enableReuse": False,
},
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
]
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
- "arv": "http://arvados.org/cwl#",
- "cwltool": "http://commonwl.org/cwltool#"
- }
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:fa5fbf21deb74f9f239daa3f5bb4b902+292/wf/submit_wf_no_reuse.cwl"
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -478,7 +405,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
- @stubs
+ @stubs()
def test_submit_container_on_error(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--on-error=stop",
@@ -490,7 +417,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- "--output-name=Output from workflow submit_wf.cwl",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
'--debug', '--on-error=stop',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -500,7 +427,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_output_name(self, stubs):
output_name = "test_output_name"
@@ -524,7 +451,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_storage_classes(self, stubs):
exited = arvados_cwl.main(
["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=foo",
@@ -536,7 +463,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- '--output-name=Output from workflow submit_wf.cwl',
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
"--debug",
"--storage-classes=foo", '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -547,7 +474,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_multiple_storage_classes(self, stubs):
exited = arvados_cwl.main(
["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=foo,bar", "--intermediate-storage-classes=baz",
@@ -559,7 +486,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- "--output-name=Output from workflow submit_wf.cwl",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
"--debug",
"--storage-classes=foo,bar", "--intermediate-storage-classes=baz", '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -573,7 +500,7 @@ class TestSubmit(unittest.TestCase):
@mock.patch("cwltool.task_queue.TaskQueue")
@mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
@mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection")
- @stubs
+ @stubs()
def test_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
final_output_c = arvados.collection.Collection()
make_output.return_value = ({},final_output_c)
@@ -584,17 +511,17 @@ class TestSubmit(unittest.TestCase):
job.side_effect = set_final_output
exited = arvados_cwl.main(
- ["--debug", "--local", "--storage-classes=foo",
+ ["--debug", "--local", "--storage-classes=foo", "--disable-git",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
- make_output.assert_called_with(u'Output of submit_wf.cwl', ['foo'], '', {}, {"out": "zzzzz"})
+ make_output.assert_called_with(u'Output from workflow submit_wf.cwl', ['foo'], '', {}, {"out": "zzzzz"})
self.assertEqual(exited, 0)
@mock.patch("cwltool.task_queue.TaskQueue")
@mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
@mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection")
- @stubs
+ @stubs()
def test_default_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
final_output_c = arvados.collection.Collection()
make_output.return_value = ({},final_output_c)
@@ -606,17 +533,17 @@ class TestSubmit(unittest.TestCase):
job.side_effect = set_final_output
exited = arvados_cwl.main(
- ["--debug", "--local",
+ ["--debug", "--local", "--disable-git",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
- make_output.assert_called_with(u'Output of submit_wf.cwl', ['default'], '', {}, {"out": "zzzzz"})
+ make_output.assert_called_with(u'Output from workflow submit_wf.cwl', ['default'], '', {}, {"out": "zzzzz"})
self.assertEqual(exited, 0)
@mock.patch("cwltool.task_queue.TaskQueue")
@mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
@mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection")
- @stubs
+ @stubs()
def test_storage_class_hint_to_make_output_collection(self, stubs, make_output, job, tq):
final_output_c = arvados.collection.Collection()
make_output.return_value = ({},final_output_c)
@@ -627,14 +554,14 @@ class TestSubmit(unittest.TestCase):
job.side_effect = set_final_output
exited = arvados_cwl.main(
- ["--debug", "--local",
+ ["--debug", "--local", "--disable-git",
"tests/wf/submit_storage_class_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
- make_output.assert_called_with(u'Output of submit_storage_class_wf.cwl', ['foo', 'bar'], '', {}, {"out": "zzzzz"})
+ make_output.assert_called_with(u'Output from workflow submit_storage_class_wf.cwl', ['foo', 'bar'], '', {}, {"out": "zzzzz"})
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_output_ttl(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--intermediate-output-ttl", "3600",
@@ -646,7 +573,8 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- "--output-name=Output from workflow submit_wf.cwl", '--debug',
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug',
'--on-error=continue',
"--intermediate-output-ttl=3600",
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -657,7 +585,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_trash_intermediate(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--trash-intermediate",
@@ -670,6 +598,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
'--debug', '--on-error=continue',
"--trash-intermediate",
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -680,7 +609,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_output_tags(self, stubs):
output_tags = "tag0,tag1,tag2"
@@ -694,7 +623,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- "--output-name=Output from workflow submit_wf.cwl",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
"--output-tags="+output_tags, '--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -704,7 +633,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_runner_ram(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-ram=2048",
@@ -713,6 +642,13 @@ class TestSubmit(unittest.TestCase):
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["runtime_constraints"]["ram"] = (2048+256)*1024*1024
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["hints"] = [
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources",
+ "ramMin": 2048
+ }
+ ]
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -722,7 +658,7 @@ class TestSubmit(unittest.TestCase):
@mock.patch("arvados.collection.CollectionReader")
@mock.patch("time.sleep")
- @stubs
+ @stubs()
def test_submit_file_keepref(self, stubs, tm, collectionReader):
collectionReader().exists.return_value = True
collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "blorp.txt")
@@ -734,7 +670,7 @@ class TestSubmit(unittest.TestCase):
@mock.patch("arvados.collection.CollectionReader")
@mock.patch("time.sleep")
- @stubs
+ @stubs()
def test_submit_keepref(self, stubs, tm, reader):
with open("tests/wf/expect_arvworkflow.cwl") as f:
reader().open().__enter__().read.return_value = f.read()
@@ -795,13 +731,13 @@ class TestSubmit(unittest.TestCase):
self.assertEqual(exited, 0)
@mock.patch("time.sleep")
- @stubs
+ @stubs()
def test_submit_arvworkflow(self, stubs, tm):
with open("tests/wf/expect_arvworkflow.cwl") as f:
stubs.api.workflows().get().execute.return_value = {"definition": f.read(), "name": "a test workflow"}
exited = arvados_cwl.main(
- ["--submit", "--no-wait", "--api=containers", "--debug",
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--disable-git",
"962eh-7fd4e-gkbzl62qqtfig37", "-x", "XxX"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api)
@@ -820,6 +756,7 @@ class TestSubmit(unittest.TestCase):
'kind': 'json',
'content': {
'cwlVersion': 'v1.0',
+ 'label': 'a test workflow',
'$graph': [
{
'id': '#main',
@@ -845,8 +782,7 @@ class TestSubmit(unittest.TestCase):
'requirements': [
{
'dockerPull': 'debian:buster-slim',
- 'class': 'DockerRequirement',
- "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
+ 'class': 'DockerRequirement'
}
],
'id': '#submit_tool.cwl',
@@ -870,8 +806,11 @@ class TestSubmit(unittest.TestCase):
'command': ['arvados-cwl-runner', '--local', '--api=containers',
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
- '--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+ '--enable-reuse', "--collection-cache-size=256",
+ "--output-name=Output from workflow a test workflow",
+ '--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
+ 'output_name': 'Output from workflow a test workflow',
'cwd': '/var/spool/cwl',
'runtime_constraints': {
'API': True,
@@ -891,7 +830,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs('hello container 123')
+ @stubs(('hello container 123', 'submit_wf.cwl'))
def test_submit_container_name(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--name=hello container 123",
@@ -906,7 +845,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_missing_input(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
@@ -920,7 +859,7 @@ class TestSubmit(unittest.TestCase):
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
self.assertEqual(exited, 1)
- @stubs
+ @stubs()
def test_submit_container_project(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.groups().get().execute.return_value = {"group_class": "project"}
@@ -935,7 +874,8 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
"--eval-timeout=20", "--thread-count=0",
'--enable-reuse', "--collection-cache-size=256",
- "--output-name=Output from workflow submit_wf.cwl", '--debug',
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug',
'--on-error=continue',
'--project-uuid='+project_uuid,
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -946,7 +886,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_eval_timeout(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--eval-timeout=60",
@@ -958,6 +898,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=60.0', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
'--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -967,7 +908,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_collection_cache(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--collection-cache-size=500",
@@ -979,6 +920,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=500",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
'--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
expect_container["runtime_constraints"]["ram"] = (1024+500)*1024*1024
@@ -989,7 +931,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_thread_count(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--thread-count=20",
@@ -1001,6 +943,7 @@ class TestSubmit(unittest.TestCase):
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=20',
'--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
'--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
@@ -1010,7 +953,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_runner_image(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-image=arvados/jobs:123",
@@ -1018,6 +961,12 @@ class TestSubmit(unittest.TestCase):
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
stubs.expect_container_spec["container_image"] = "999999999999999999999999999999d5+99"
+ stubs.expect_container_spec["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["hints"] = [
+ {
+ "acrContainerImage": "999999999999999999999999999999d5+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
+ ]
expect_container = copy.deepcopy(stubs.expect_container_spec)
stubs.api.container_requests().create.assert_called_with(
@@ -1026,7 +975,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_priority(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--priority=669",
@@ -1042,7 +991,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs('submit_wf_runner_resources.cwl')
+ @stubs(('submit_wf_runner_resources.cwl', None))
def test_submit_wf_runner_resources(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
@@ -1055,24 +1004,20 @@ class TestSubmit(unittest.TestCase):
"vcpus": 2,
"ram": (2000+512) * 2**20
}
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["hints"] = [
{
"class": "http://arvados.org/cwl#WorkflowRunnerResources",
+ "acrContainerImage": "999999999999999999999999999999d3+99",
"coresMin": 2,
"ramMin": 2000,
"keep_cache": 512
}
]
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
- "arv": "http://arvados.org/cwl#",
- }
- expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate', '--disable-color',
- '--eval-timeout=20', '--thread-count=0',
- '--enable-reuse', "--collection-cache-size=512",
- '--output-name=Output from workflow submit_wf_runner_resources.cwl',
- '--debug', '--on-error=continue',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ #expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
+ # "arv": "http://arvados.org/cwl#",
+ #}
+ expect_container["command"] = ["--collection-cache-size=512" if v == "--collection-cache-size=256" else v for v in expect_container["command"]]
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:80b60e39456505b91d3989a1f5058b98+308/wf/submit_wf_runner_resources.cwl"
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -1084,50 +1029,44 @@ class TestSubmit(unittest.TestCase):
@mock.patch("cwltool.docker.DockerCommandLineJob.get_image")
@mock.patch("arvados.api")
def test_arvados_jobs_image(self, api, get_image, find_one_image_hash):
- arvados_cwl.arvdocker.arv_docker_clear_cache()
+ #arvados_cwl.arvdocker.arv_docker_clear_cache()
arvrunner = mock.MagicMock()
arvrunner.project_uuid = ""
api.return_value = mock.MagicMock()
arvrunner.api = api.return_value
arvrunner.runtimeContext.match_local_docker = False
- arvrunner.api.links().list().execute.side_effect = ({"items": [{"created_at": "",
- "head_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
- "link_class": "docker_image_repo+tag",
- "name": "arvados/jobs:"+arvados_cwl.__version__,
- "owner_uuid": "",
- "properties": {"image_timestamp": ""}}], "items_available": 1, "offset": 0},
- {"items": [{"created_at": "",
- "head_uuid": "",
- "link_class": "docker_image_hash",
- "name": "123456",
- "owner_uuid": "",
- "properties": {"image_timestamp": ""}}], "items_available": 1, "offset": 0},
- {"items": [{"created_at": "",
- "head_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
- "link_class": "docker_image_repo+tag",
- "name": "arvados/jobs:"+arvados_cwl.__version__,
- "owner_uuid": "",
- "properties": {"image_timestamp": ""}}], "items_available": 1, "offset": 0},
- {"items": [{"created_at": "",
- "head_uuid": "",
- "link_class": "docker_image_hash",
- "name": "123456",
- "owner_uuid": "",
- "properties": {"image_timestamp": ""}}], "items_available": 1, "offset": 0}
- )
+ arvrunner.api.links().list().execute.side_effect = itertools.cycle([
+ {"items": [{"created_at": "2023-08-25T12:34:56.123456Z",
+ "head_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
+ "link_class": "docker_image_repo+tag",
+ "name": "arvados/jobs:"+arvados_cwl.__version__,
+ "owner_uuid": "",
+ "uuid": "zzzzz-o0j2j-arvadosjobsrepo",
+ "properties": {"image_timestamp": ""}}]},
+ {"items": []},
+ {"items": []},
+ {"items": [{"created_at": "2023-08-25T12:34:57.234567Z",
+ "head_uuid": "",
+ "link_class": "docker_image_hash",
+ "name": "123456",
+ "owner_uuid": "",
+ "uuid": "zzzzz-o0j2j-arvadosjobshash",
+ "properties": {"image_timestamp": ""}}]},
+ {"items": []},
+ {"items": []},
+ ])
find_one_image_hash.return_value = "123456"
- arvrunner.api.collections().list().execute.side_effect = ({"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
- "owner_uuid": "",
- "manifest_text": "",
- "properties": ""
- }], "items_available": 1, "offset": 0},
- {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
- "owner_uuid": "",
- "manifest_text": "",
- "properties": ""
- }], "items_available": 1, "offset": 0})
+ arvrunner.api.collections().list().execute.side_effect = itertools.cycle([
+ {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
+ "owner_uuid": "",
+ "manifest_text": "",
+ "created_at": "2023-08-25T12:34:55.012345Z",
+ "properties": {}}]},
+ {"items": []},
+ {"items": []},
+ ])
arvrunner.api.collections().create().execute.return_value = {"uuid": ""}
arvrunner.api.collections().get().execute.return_value = {"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
"portable_data_hash": "9999999999999999999999999999999b+99"}
@@ -1136,13 +1075,16 @@ class TestSubmit(unittest.TestCase):
arvados_cwl.runner.arvados_jobs_image(arvrunner, "arvados/jobs:"+arvados_cwl.__version__, arvrunner.runtimeContext))
- @stubs
+ @stubs()
def test_submit_secrets(self, stubs):
exited = arvados_cwl.main(
- ["--submit", "--no-wait", "--api=containers", "--debug",
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--disable-git",
"tests/wf/secret_wf.cwl", "tests/secret_test_job.yml"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+ stubs.git_props["arv:gitPath"] = "sdk/cwl/tests/wf/secret_wf.cwl"
+ stubs.git_info["http://arvados.org/cwl#gitPath"] = "sdk/cwl/tests/wf/secret_wf.cwl"
+
expect_container = {
"command": [
"arvados-cwl-runner",
@@ -1155,8 +1097,8 @@ class TestSubmit(unittest.TestCase):
'--thread-count=0',
"--enable-reuse",
"--collection-cache-size=256",
- '--output-name=Output from workflow secret_wf.cwl'
- '--debug',
+ "--output-name=Output from workflow secret_wf.cwl",
+ "--debug",
"--on-error=continue",
"/var/lib/cwl/workflow.json#main",
"/var/lib/cwl/cwl.input.json"
@@ -1175,62 +1117,23 @@ class TestSubmit(unittest.TestCase):
"/var/lib/cwl/workflow.json": {
"content": {
"$graph": [
- {
- "arguments": [
- "md5sum",
- "example.conf"
- ],
- "class": "CommandLineTool",
- "hints": [
- {
- "class": "http://commonwl.org/cwltool#Secrets",
- "secrets": [
- "#secret_job.cwl/pw"
- ]
- }
- ],
- "id": "#secret_job.cwl",
- "inputs": [
- {
- "id": "#secret_job.cwl/pw",
- "type": "string"
- }
- ],
- "outputs": [
- {
- "id": "#secret_job.cwl/out",
- "type": "File",
- "outputBinding": {
- "glob": "hashed_example.txt"
- }
- }
- ],
- "stdout": "hashed_example.txt",
- "requirements": [
- {
- "class": "InitialWorkDirRequirement",
- "listing": [
- {
- "entry": "username: user\npassword: $(inputs.pw)\n",
- "entryname": "example.conf"
- }
- ]
- }
- ]
- },
{
"class": "Workflow",
"hints": [
{
- "class": "DockerRequirement",
- "dockerPull": "debian:buster-slim",
- "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
+ "class": "DockerRequirement",
+ "dockerPull": "debian:buster-slim",
+ "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
},
{
"class": "http://commonwl.org/cwltool#Secrets",
"secrets": [
"#main/pw"
]
+ },
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
}
],
"id": "#main",
@@ -1243,31 +1146,34 @@ class TestSubmit(unittest.TestCase):
"outputs": [
{
"id": "#main/out",
- "outputSource": "#main/step1/out",
+ "outputSource": "#main/step/out",
"type": "File"
}
],
+ "requirements": [
+ {
+ "class": "SubworkflowFeatureRequirement"
+ }
+ ],
"steps": [
{
- "id": "#main/step1",
+ "id": "#main/secret_wf.cwl",
"in": [
{
- "id": "#main/step1/pw",
+ "id": "#main/step/pw",
"source": "#main/pw"
}
],
+ "label": "secret_wf.cwl",
"out": [
- "#main/step1/out"
+ {"id": "#main/step/out"}
],
- "run": "#secret_job.cwl"
+ "run": "keep:991302581d01db470345a131480e623b+247/secret_wf.cwl"
}
]
}
],
- "$namespaces": {
- "cwltool": "http://commonwl.org/cwltool#"
- },
- "cwlVersion": "v1.0"
+ "cwlVersion": "v1.2"
},
"kind": "json"
},
@@ -1306,7 +1212,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_request_uuid(self, stubs):
stubs.api._rootDesc["remoteHosts"]["zzzzz"] = "123"
stubs.expect_container_request_uuid = "zzzzz-xvhdp-yyyyyyyyyyyyyyy"
@@ -1328,7 +1234,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_container_cluster_id(self, stubs):
stubs.api._rootDesc["remoteHosts"]["zbbbb"] = "123"
@@ -1345,7 +1251,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_validate_cluster_id(self, stubs):
stubs.api._rootDesc["remoteHosts"]["zbbbb"] = "123"
exited = arvados_cwl.main(
@@ -1354,7 +1260,7 @@ class TestSubmit(unittest.TestCase):
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
self.assertEqual(exited, 1)
- @stubs
+ @stubs()
def test_submit_validate_project_uuid(self, stubs):
# Fails with bad cluster prefix
exited = arvados_cwl.main(
@@ -1380,7 +1286,7 @@ class TestSubmit(unittest.TestCase):
@mock.patch("arvados.collection.CollectionReader")
- @stubs
+ @stubs()
def test_submit_uuid_inputs(self, stubs, collectionReader):
collectionReader().exists.return_value = True
collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "file1.txt")
@@ -1394,6 +1300,7 @@ class TestSubmit(unittest.TestCase):
m.execute.return_value = {"items": []}
return m
stubs.api.collections().list.side_effect = list_side_effect
+ collectionReader().portable_data_hash.return_value = "99999999999999999999999999999998+99"
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
@@ -1415,7 +1322,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_mismatched_uuid_inputs(self, stubs):
def list_side_effect(**kwargs):
m = mock.MagicMock()
@@ -1448,7 +1355,7 @@ class TestSubmit(unittest.TestCase):
cwltool_logger.removeHandler(stderr_logger)
@mock.patch("arvados.collection.CollectionReader")
- @stubs
+ @stubs()
def test_submit_unknown_uuid_inputs(self, stubs, collectionReader):
collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "file1.txt")
capture_stderr = StringIO()
@@ -1470,7 +1377,7 @@ class TestSubmit(unittest.TestCase):
finally:
cwltool_logger.removeHandler(stderr_logger)
- @stubs('submit_wf_process_properties.cwl')
+ @stubs(('submit_wf_process_properties.cwl', None))
def test_submit_set_process_properties(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
@@ -1479,7 +1386,7 @@ class TestSubmit(unittest.TestCase):
expect_container = copy.deepcopy(stubs.expect_container_spec)
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["hints"] = [
{
"class": "http://arvados.org/cwl#ProcessProperties",
"processProperties": [
@@ -1494,20 +1401,26 @@ class TestSubmit(unittest.TestCase):
}
}
],
+ },
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
}
]
- expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
- "arv": "http://arvados.org/cwl#"
- }
+ #expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
+ # "arv": "http://arvados.org/cwl#"
+ #}
- expect_container["properties"] = {
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["steps"][0]["run"] = "keep:df44f9dd4b9467159f210f967e45417f+312/wf/submit_wf_process_properties.cwl"
+
+ expect_container["properties"].update({
"baz": "blorp.txt",
"foo": "bar",
"quux": {
"q1": 1,
"q2": 2
}
- }
+ })
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -1516,7 +1429,7 @@ class TestSubmit(unittest.TestCase):
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_enable_preemptible(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--enable-preemptible",
@@ -1525,11 +1438,13 @@ class TestSubmit(unittest.TestCase):
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate', '--disable-color',
- '--eval-timeout=20', '--thread-count=0',
- '--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug', '--on-error=continue',
'--enable-preemptible',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -1537,7 +1452,7 @@ class TestSubmit(unittest.TestCase):
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_submit_disable_preemptible(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--disable-preemptible",
@@ -1546,11 +1461,57 @@ class TestSubmit(unittest.TestCase):
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate', '--disable-color',
- '--eval-timeout=20', '--thread-count=0',
- '--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug', '--on-error=continue',
'--disable-preemptible',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+ @stubs()
+ def test_submit_container_prefer_cached_downloads(self, stubs):
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--prefer-cached-downloads",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug', "--on-error=continue", '--prefer-cached-downloads',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+ @stubs()
+ def test_submit_container_varying_url_params(self, stubs):
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--varying-url-params", "KeyId,Signature",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256",
+ '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props["arv:gitDescribe"],
+ '--debug', "--on-error=continue", "--varying-url-params=KeyId,Signature",
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
@@ -1562,11 +1523,13 @@ class TestSubmit(unittest.TestCase):
class TestCreateWorkflow(unittest.TestCase):
existing_workflow_uuid = "zzzzz-7fd4e-validworkfloyml"
expect_workflow = StripYAMLComments(
- open("tests/wf/expect_upload_packed.cwl").read().rstrip())
+ open("tests/wf/expect_upload_wrapper.cwl").read().rstrip())
+ expect_workflow_altname = StripYAMLComments(
+ open("tests/wf/expect_upload_wrapper_altname.cwl").read().rstrip())
def setUp(self):
cwltool.process._names = set()
- arvados_cwl.arvdocker.arv_docker_clear_cache()
+ #arvados_cwl.arvdocker.arv_docker_clear_cache()
def tearDown(self):
root_logger = logging.getLogger('')
@@ -1575,7 +1538,7 @@ class TestCreateWorkflow(unittest.TestCase):
handlers = [h for h in root_logger.handlers if not isinstance(h, arvados_cwl.executor.RuntimeStatusLoggingHandler)]
root_logger.handlers = handlers
- @stubs
+ @stubs()
def test_create(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.groups().get().execute.return_value = {"group_class": "project"}
@@ -1584,6 +1547,7 @@ class TestCreateWorkflow(unittest.TestCase):
["--create-workflow", "--debug",
"--api=containers",
"--project-uuid", project_uuid,
+ "--disable-git",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api)
@@ -1605,7 +1569,7 @@ class TestCreateWorkflow(unittest.TestCase):
stubs.expect_workflow_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_create_name(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.groups().get().execute.return_value = {"group_class": "project"}
@@ -1615,6 +1579,7 @@ class TestCreateWorkflow(unittest.TestCase):
"--api=containers",
"--project-uuid", project_uuid,
"--name", "testing 123",
+ "--disable-git",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api)
@@ -1626,7 +1591,7 @@ class TestCreateWorkflow(unittest.TestCase):
"owner_uuid": project_uuid,
"name": "testing 123",
"description": "",
- "definition": self.expect_workflow,
+ "definition": self.expect_workflow_altname,
}
}
stubs.api.workflows().create.assert_called_with(
@@ -1637,7 +1602,7 @@ class TestCreateWorkflow(unittest.TestCase):
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_update(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.workflows().get().execute.return_value = {"owner_uuid": project_uuid}
@@ -1645,6 +1610,7 @@ class TestCreateWorkflow(unittest.TestCase):
exited = arvados_cwl.main(
["--update-workflow", self.existing_workflow_uuid,
"--debug",
+ "--disable-git",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api)
@@ -1664,7 +1630,7 @@ class TestCreateWorkflow(unittest.TestCase):
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_update_name(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.workflows().get().execute.return_value = {"owner_uuid": project_uuid}
@@ -1672,6 +1638,7 @@ class TestCreateWorkflow(unittest.TestCase):
exited = arvados_cwl.main(
["--update-workflow", self.existing_workflow_uuid,
"--debug", "--name", "testing 123",
+ "--disable-git",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api)
@@ -1679,7 +1646,7 @@ class TestCreateWorkflow(unittest.TestCase):
"workflow": {
"name": "testing 123",
"description": "",
- "definition": self.expect_workflow,
+ "definition": self.expect_workflow_altname,
"owner_uuid": project_uuid
}
}
@@ -1690,7 +1657,7 @@ class TestCreateWorkflow(unittest.TestCase):
self.existing_workflow_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_create_collection_per_tool(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.groups().get().execute.return_value = {"group_class": "project"}
@@ -1699,10 +1666,11 @@ class TestCreateWorkflow(unittest.TestCase):
["--create-workflow", "--debug",
"--api=containers",
"--project-uuid", project_uuid,
+ "--disable-git",
"tests/collection_per_tool/collection_per_tool.cwl"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api)
- toolfile = "tests/collection_per_tool/collection_per_tool_packed.cwl"
+ toolfile = "tests/collection_per_tool/collection_per_tool_wrapper.cwl"
expect_workflow = StripYAMLComments(open(toolfile).read().rstrip())
body = {
@@ -1720,7 +1688,7 @@ class TestCreateWorkflow(unittest.TestCase):
stubs.expect_workflow_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_create_with_imports(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.groups().get().execute.return_value = {"group_class": "project"}
@@ -1739,7 +1707,7 @@ class TestCreateWorkflow(unittest.TestCase):
stubs.expect_workflow_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs
+ @stubs()
def test_create_with_no_input(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
stubs.api.groups().get().execute.return_value = {"group_class": "project"}
@@ -1757,3 +1725,55 @@ class TestCreateWorkflow(unittest.TestCase):
self.assertEqual(stubs.capture_stdout.getvalue(),
stubs.expect_workflow_uuid + '\n')
self.assertEqual(exited, 0)
+
+ @stubs()
+ def test_create_map(self, stubs):
+ # test uploading a document that uses objects instead of arrays
+ # for certain fields like inputs and requirements.
+
+ project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
+
+ exited = arvados_cwl.main(
+ ["--create-workflow", "--debug",
+ "--api=containers",
+ "--project-uuid", project_uuid,
+ "--disable-git",
+ "tests/wf/submit_wf_map.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+ stubs.api.pipeline_templates().create.refute_called()
+ stubs.api.container_requests().create.refute_called()
+
+ expect_workflow = StripYAMLComments(
+ open("tests/wf/expect_upload_wrapper_map.cwl").read().rstrip())
+
+ body = {
+ "workflow": {
+ "owner_uuid": project_uuid,
+ "name": "submit_wf_map.cwl",
+ "description": "",
+ "definition": expect_workflow,
+ }
+ }
+ stubs.api.workflows().create.assert_called_with(
+ body=JsonDiffMatcher(body))
+
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_workflow_uuid + '\n')
+ self.assertEqual(exited, 0)
+
+
+class TestPrintKeepDeps(unittest.TestCase):
+ @stubs()
+ def test_print_keep_deps(self, stubs):
+ # test --print-keep-deps which is used by arv-copy
+
+ exited = arvados_cwl.main(
+ ["--print-keep-deps", "--debug",
+ "tests/wf/submit_wf_map.cwl"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ '["5d373e7629203ce39e7c22af98a0f881+52", "999999999999999999999999999999d4+99"]' + '\n')
+ self.assertEqual(exited, 0)
diff --git a/sdk/cwl/tests/test_tq.py b/sdk/cwl/tests/test_tq.py
index 05e5116d72..bf53f8912e 100644
--- a/sdk/cwl/tests/test_tq.py
+++ b/sdk/cwl/tests/test_tq.py
@@ -3,7 +3,6 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
@@ -11,6 +10,8 @@ import logging
import os
import threading
+from unittest import mock
+
from cwltool.task_queue import TaskQueue
def success_task():
diff --git a/sdk/cwl/tests/test_urljoin.py b/sdk/cwl/tests/test_urljoin.py
index 86a053ea48..08bca55e3d 100644
--- a/sdk/cwl/tests/test_urljoin.py
+++ b/sdk/cwl/tests/test_urljoin.py
@@ -3,13 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import functools
-import mock
import sys
import unittest
import json
import logging
import os
+from unittest import mock
+
import arvados
import arvados.keep
import arvados.collection
diff --git a/sdk/cwl/tests/test_util.py b/sdk/cwl/tests/test_util.py
index 3ca02c7df6..e7159f4102 100644
--- a/sdk/cwl/tests/test_util.py
+++ b/sdk/cwl/tests/test_util.py
@@ -5,12 +5,14 @@
from builtins import bytes
import unittest
-import mock
import datetime
import httplib2
+from unittest import mock
+
from arvados_cwl.util import *
from arvados.errors import ApiError
+from arvados_cwl.util import common_prefix
class MockDateTime(datetime.datetime):
@classmethod
@@ -29,7 +31,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(info["name"], "Intermediate collection for step one")
self.assertEqual(info["trash_at"], datetime.datetime(2018, 1, 1, 0, 2, 0, 0))
- self.assertEqual(info["properties"], {"type" : "intermediate", "container" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
+ self.assertEqual(info["properties"], {"type" : "intermediate", "container_uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
def test_get_current_container_success(self):
api = mock.MagicMock()
@@ -53,4 +55,19 @@ class TestUtil(unittest.TestCase):
logger = mock.MagicMock()
current_container = get_current_container(api, num_retries=0, logger=logger)
- self.assertEqual(current_container, None)
\ No newline at end of file
+ self.assertEqual(current_container, None)
+
+ def test_common_prefix(self):
+ self.assertEqual(common_prefix("file:///foo/bar", ["file:///foo/bar/baz"]), "file:///foo/")
+ self.assertEqual(common_prefix("file:///foo", ["file:///foo", "file:///foo/bar", "file:///foo/bar/"]), "file:///")
+ self.assertEqual(common_prefix("file:///foo/", ["file:///foo/", "file:///foo/bar", "file:///foo/bar/"]), "file:///foo/")
+ self.assertEqual(common_prefix("file:///foo/bar", ["file:///foo/bar", "file:///foo/baz", "file:///foo/quux/q2"]), "file:///foo/")
+ self.assertEqual(common_prefix("file:///foo/bar/", ["file:///foo/bar/", "file:///foo/baz", "file:///foo/quux/q2"]), "file:///foo/")
+ self.assertEqual(common_prefix("file:///foo/bar/splat", ["file:///foo/bar/splat", "file:///foo/baz", "file:///foo/quux/q2"]), "file:///foo/")
+ self.assertEqual(common_prefix("file:///foo/bar/splat", ["file:///foo/bar/splat", "file:///nope", "file:///foo/quux/q2"]), "file:///")
+ self.assertEqual(common_prefix("file:///blub/foo", ["file:///blub/foo", "file:///blub/foo/bar", "file:///blub/foo/bar/"]), "file:///blub/")
+
+ # sanity check, the subsequent code strips off the prefix so
+ # just confirm the logic doesn't have a fencepost error
+ prefix = "file:///"
+ self.assertEqual("file:///foo/bar"[len(prefix):], "foo/bar")
diff --git a/sdk/cwl/tests/tool/submit_tool_map.cwl b/sdk/cwl/tests/tool/submit_tool_map.cwl
new file mode 100644
index 0000000000..7a833d471b
--- /dev/null
+++ b/sdk/cwl/tests/tool/submit_tool_map.cwl
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a tool file for dependencies (e.g. default
+# value blub.txt) and uploading to Keep works as intended.
+
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+ DockerRequirement:
+ dockerPull: debian:buster-slim
+inputs:
+ x:
+ type: File
+ default:
+ class: File
+ location: blub.txt
+ inputBinding:
+ position: 1
+outputs: []
+baseCommand: cat
diff --git a/sdk/cwl/tests/wf/expect_upload_wrapper.cwl b/sdk/cwl/tests/wf/expect_upload_wrapper.cwl
new file mode 100644
index 0000000000..05599b652d
--- /dev/null
+++ b/sdk/cwl/tests/wf/expect_upload_wrapper.cwl
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+ "$graph": [
+ {
+ "class": "Workflow",
+ "hints": [
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
+ ],
+ "id": "#main",
+ "inputs": [
+ {
+ "default": {
+ "basename": "blorp.txt",
+ "class": "File",
+ "location": "keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt",
+ "nameext": ".txt",
+ "nameroot": "blorp",
+ "size": 16
+ },
+ "id": "#main/x",
+ "type": "File"
+ },
+ {
+ "default": {
+ "basename": "99999999999999999999999999999998+99",
+ "class": "Directory",
+ "location": "keep:99999999999999999999999999999998+99"
+ },
+ "id": "#main/y",
+ "type": "Directory"
+ },
+ {
+ "default": {
+ "basename": "anonymous",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "renamed.txt",
+ "class": "File",
+ "location": "keep:99999999999999999999999999999998+99/file1.txt",
+ "nameext": ".txt",
+ "nameroot": "renamed",
+ "size": 0
+ }
+ ]
+ },
+ "id": "#main/z",
+ "type": "Directory"
+ }
+ ],
+ "outputs": [],
+ "requirements": [
+ {
+ "class": "SubworkflowFeatureRequirement"
+ }
+ ],
+ "steps": [
+ {
+ "id": "#main/submit_wf.cwl",
+ "in": [
+ {
+ "id": "#main/step/x",
+ "source": "#main/x"
+ },
+ {
+ "id": "#main/step/y",
+ "source": "#main/y"
+ },
+ {
+ "id": "#main/step/z",
+ "source": "#main/z"
+ }
+ ],
+ "label": "submit_wf.cwl",
+ "out": [],
+ "run": "keep:5b4cf4181c65ad292ccba5b142d758a4+274/wf/submit_wf.cwl"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.2"
+}
diff --git a/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl b/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl
new file mode 100644
index 0000000000..63031110cd
--- /dev/null
+++ b/sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+ "$graph": [
+ {
+ "class": "Workflow",
+ "hints": [
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
+ ],
+ "id": "#main",
+ "inputs": [
+ {
+ "default": {
+ "basename": "blorp.txt",
+ "class": "File",
+ "location": "keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt",
+ "nameext": ".txt",
+ "nameroot": "blorp",
+ "size": 16
+ },
+ "id": "#main/x",
+ "type": "File"
+ },
+ {
+ "default": {
+ "basename": "99999999999999999999999999999998+99",
+ "class": "Directory",
+ "location": "keep:99999999999999999999999999999998+99"
+ },
+ "id": "#main/y",
+ "type": "Directory"
+ },
+ {
+ "default": {
+ "basename": "anonymous",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "renamed.txt",
+ "class": "File",
+ "location": "keep:99999999999999999999999999999998+99/file1.txt",
+ "nameext": ".txt",
+ "nameroot": "renamed",
+ "size": 0
+ }
+ ]
+ },
+ "id": "#main/z",
+ "type": "Directory"
+ }
+ ],
+ "outputs": [],
+ "requirements": [
+ {
+ "class": "SubworkflowFeatureRequirement"
+ }
+ ],
+ "steps": [
+ {
+ "id": "#main/submit_wf.cwl",
+ "in": [
+ {
+ "id": "#main/step/x",
+ "source": "#main/x"
+ },
+ {
+ "id": "#main/step/y",
+ "source": "#main/y"
+ },
+ {
+ "id": "#main/step/z",
+ "source": "#main/z"
+ }
+ ],
+ "label": "testing 123",
+ "out": [],
+ "run": "keep:5b4cf4181c65ad292ccba5b142d758a4+274/wf/submit_wf.cwl"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.2"
+}
diff --git a/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl b/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl
new file mode 100644
index 0000000000..8f98f4718c
--- /dev/null
+++ b/sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+ "$graph": [
+ {
+ "class": "Workflow",
+ "hints": [
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
+ ],
+ "id": "#main",
+ "inputs": [
+ {
+ "default": {
+ "basename": "blorp.txt",
+ "class": "File",
+ "location": "keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt",
+ "nameext": ".txt",
+ "nameroot": "blorp",
+ "size": 16
+ },
+ "id": "#main/x",
+ "type": "File"
+ },
+ {
+ "default": {
+ "basename": "99999999999999999999999999999998+99",
+ "class": "Directory",
+ "location": "keep:99999999999999999999999999999998+99"
+ },
+ "id": "#main/y",
+ "type": "Directory"
+ },
+ {
+ "default": {
+ "basename": "anonymous",
+ "class": "Directory",
+ "listing": [
+ {
+ "basename": "renamed.txt",
+ "class": "File",
+ "location": "keep:99999999999999999999999999999998+99/file1.txt",
+ "nameext": ".txt",
+ "nameroot": "renamed",
+ "size": 0
+ }
+ ]
+ },
+ "id": "#main/z",
+ "type": "Directory"
+ }
+ ],
+ "outputs": [],
+ "requirements": [
+ {
+ "class": "SubworkflowFeatureRequirement"
+ }
+ ],
+ "steps": [
+ {
+ "id": "#main/submit_wf_map.cwl",
+ "in": [
+ {
+ "id": "#main/step/x",
+ "source": "#main/x"
+ },
+ {
+ "id": "#main/step/y",
+ "source": "#main/y"
+ },
+ {
+ "id": "#main/step/z",
+ "source": "#main/z"
+ }
+ ],
+ "label": "submit_wf_map.cwl",
+ "out": [],
+ "run": "keep:2b94b65162db72023301a582e085646f+290/wf/submit_wf_map.cwl"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.2"
+}
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf.cwl b/sdk/cwl/tests/wf/runin-reqs-wf.cwl
index 22cc82b7f3..3e229e6652 100644
--- a/sdk/cwl/tests/wf/runin-reqs-wf.cwl
+++ b/sdk/cwl/tests/wf/runin-reqs-wf.cwl
@@ -15,8 +15,7 @@ inputs:
default:
class: File
location: check_mem.py
-outputs:
- out: []
+outputs: []
requirements:
SubworkflowFeatureRequirement: {}
ScatterFeatureRequirement: {}
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf2.cwl b/sdk/cwl/tests/wf/runin-reqs-wf2.cwl
index 4bde6c562c..4301904592 100644
--- a/sdk/cwl/tests/wf/runin-reqs-wf2.cwl
+++ b/sdk/cwl/tests/wf/runin-reqs-wf2.cwl
@@ -15,8 +15,7 @@ inputs:
default:
class: File
location: check_mem.py
-outputs:
- out: []
+outputs: []
requirements:
SubworkflowFeatureRequirement: {}
ScatterFeatureRequirement: {}
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf3.cwl b/sdk/cwl/tests/wf/runin-reqs-wf3.cwl
index c13b7a0bc6..b08af0063c 100644
--- a/sdk/cwl/tests/wf/runin-reqs-wf3.cwl
+++ b/sdk/cwl/tests/wf/runin-reqs-wf3.cwl
@@ -15,8 +15,7 @@ inputs:
default:
class: File
location: check_mem.py
-outputs:
- out: []
+outputs: []
requirements:
SubworkflowFeatureRequirement: {}
ScatterFeatureRequirement: {}
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf4.cwl b/sdk/cwl/tests/wf/runin-reqs-wf4.cwl
index d00ee85775..747e3c8d9e 100644
--- a/sdk/cwl/tests/wf/runin-reqs-wf4.cwl
+++ b/sdk/cwl/tests/wf/runin-reqs-wf4.cwl
@@ -15,8 +15,7 @@ inputs:
default:
class: File
location: check_mem.py
-outputs:
- out: []
+outputs: []
requirements:
SubworkflowFeatureRequirement: {}
ScatterFeatureRequirement: {}
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf5.cwl b/sdk/cwl/tests/wf/runin-reqs-wf5.cwl
index 647b07edfa..bf598b938f 100644
--- a/sdk/cwl/tests/wf/runin-reqs-wf5.cwl
+++ b/sdk/cwl/tests/wf/runin-reqs-wf5.cwl
@@ -15,8 +15,7 @@ inputs:
default:
class: File
location: check_mem.py
-outputs:
- out: []
+outputs: []
requirements:
SubworkflowFeatureRequirement: {}
ScatterFeatureRequirement: {}
diff --git a/sdk/cwl/tests/wf/runseparate-wf.cwl b/sdk/cwl/tests/wf/runseparate-wf.cwl
new file mode 100644
index 0000000000..e4ab627256
--- /dev/null
+++ b/sdk/cwl/tests/wf/runseparate-wf.cwl
@@ -0,0 +1,68 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+inputs:
+ sleeptime:
+ type: int
+ default: 5
+ fileblub:
+ type: File
+ default:
+ class: File
+ location: keep:d7514270f356df848477718d58308cc4+94/a
+ secondaryFiles:
+ - class: File
+ location: keep:d7514270f356df848477718d58308cc4+94/b
+outputs:
+ out:
+ type: string
+ outputSource: substep/out
+requirements:
+ SubworkflowFeatureRequirement: {}
+ ScatterFeatureRequirement: {}
+ InlineJavascriptRequirement: {}
+ StepInputExpressionRequirement: {}
+steps:
+ substep:
+ in:
+ sleeptime: sleeptime
+ fileblub: fileblub
+ out: [out]
+ hints:
+ - class: arv:SeparateRunner
+ runnerProcessName: $("sleeptime "+inputs.sleeptime)
+ - class: DockerRequirement
+ dockerPull: arvados/jobs:2.2.2
+ run:
+ class: Workflow
+ id: mysub
+ inputs:
+ fileblub: File
+ sleeptime: int
+ outputs:
+ out:
+ type: string
+ outputSource: sleep1/out
+ steps:
+ sleep1:
+ in:
+ fileblub: fileblub
+ out: [out]
+ run:
+ class: CommandLineTool
+ id: subtool
+ inputs:
+ fileblub:
+ type: File
+ inputBinding: {position: 1}
+ outputs:
+ out:
+ type: string
+ outputBinding:
+ outputEval: 'out'
+ baseCommand: cat
diff --git a/sdk/cwl/tests/wf/secret_wf.cwl b/sdk/cwl/tests/wf/secret_wf.cwl
index 5d2e231ec8..19d4262eb8 100644
--- a/sdk/cwl/tests/wf/secret_wf.cwl
+++ b/sdk/cwl/tests/wf/secret_wf.cwl
@@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: Apache-2.0
-cwlVersion: v1.0
+cwlVersion: v1.2
class: Workflow
$namespaces:
cwltool: http://commonwl.org/cwltool#
diff --git a/sdk/cwl/tests/wf/submit_wf.cwl b/sdk/cwl/tests/wf/submit_wf.cwl
index 6856e54748..405f99bcb0 100644
--- a/sdk/cwl/tests/wf/submit_wf.cwl
+++ b/sdk/cwl/tests/wf/submit_wf.cwl
@@ -8,7 +8,7 @@
# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
class: Workflow
-cwlVersion: v1.0
+cwlVersion: v1.2
inputs:
- id: x
type: File
diff --git a/sdk/cwl/tests/wf/submit_wf_map.cwl b/sdk/cwl/tests/wf/submit_wf_map.cwl
new file mode 100644
index 0000000000..e8bb9cf77c
--- /dev/null
+++ b/sdk/cwl/tests/wf/submit_wf_map.cwl
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a workflow file for dependencies
+# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
+
+class: Workflow
+cwlVersion: v1.2
+inputs:
+ x:
+ type: File
+ y:
+ type: Directory
+ z:
+ type: Directory
+outputs: []
+steps:
+ step1:
+ in:
+ x: x
+ out: []
+ run: ../tool/submit_tool_map.cwl
diff --git a/sdk/cwl/tests/wf/submit_wf_no_reuse.cwl b/sdk/cwl/tests/wf/submit_wf_no_reuse.cwl
index 636b850b7d..d3ebbb9787 100644
--- a/sdk/cwl/tests/wf/submit_wf_no_reuse.cwl
+++ b/sdk/cwl/tests/wf/submit_wf_no_reuse.cwl
@@ -5,7 +5,7 @@
# Test case for arvados-cwl-runner. Disables job/container reuse.
class: Workflow
-cwlVersion: v1.0
+cwlVersion: v1.2
$namespaces:
arv: "http://arvados.org/cwl#"
cwltool: "http://commonwl.org/cwltool#"
@@ -24,5 +24,5 @@ steps:
out: []
run: ../tool/submit_tool.cwl
hints:
- arv:ReuseRequirement:
+ WorkReuse:
enableReuse: false
diff --git a/sdk/cwl/tests/wf/submit_wf_process_properties.cwl b/sdk/cwl/tests/wf/submit_wf_process_properties.cwl
index 0d669272f4..ebea61aea9 100644
--- a/sdk/cwl/tests/wf/submit_wf_process_properties.cwl
+++ b/sdk/cwl/tests/wf/submit_wf_process_properties.cwl
@@ -11,7 +11,7 @@ $namespaces:
arv: "http://arvados.org/cwl#"
class: Workflow
-cwlVersion: v1.0
+cwlVersion: v1.2
hints:
arv:ProcessProperties:
diff --git a/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl b/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl
index 814cd07ab5..6fa841d450 100644
--- a/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl
+++ b/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl
@@ -8,7 +8,7 @@
# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
class: Workflow
-cwlVersion: v1.0
+cwlVersion: v1.2
$namespaces:
arv: "http://arvados.org/cwl#"
hints:
diff --git a/sdk/cwl/tests/wf/submit_wf_wrapper.cwl b/sdk/cwl/tests/wf/submit_wf_wrapper.cwl
new file mode 100644
index 0000000000..3e015d65df
--- /dev/null
+++ b/sdk/cwl/tests/wf/submit_wf_wrapper.cwl
@@ -0,0 +1,61 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+ "$graph": [
+ {
+ "class": "Workflow",
+ "hints": [
+ {
+ "acrContainerImage": "999999999999999999999999999999d3+99",
+ "class": "http://arvados.org/cwl#WorkflowRunnerResources"
+ }
+ ],
+ "id": "#main",
+ "inputs": [
+ {
+ "id": "#main/x",
+ "type": "File"
+ },
+ {
+ "id": "#main/y",
+ "type": "Directory"
+ },
+ {
+ "id": "#main/z",
+ "type": "Directory"
+ }
+ ],
+ "outputs": [],
+ "requirements": [
+ {
+ "class": "SubworkflowFeatureRequirement"
+ }
+ ],
+ "steps": [
+ {
+ "id": "#main/submit_wf.cwl",
+ "in": [
+ {
+ "id": "#main/step/x",
+ "source": "#main/x"
+ },
+ {
+ "id": "#main/step/y",
+ "source": "#main/y"
+ },
+ {
+ "id": "#main/step/z",
+ "source": "#main/z"
+ }
+ ],
+ "label": "submit_wf.cwl",
+ "out": [],
+ "run": "keep:5b4cf4181c65ad292ccba5b142d758a4+274/wf/submit_wf.cwl"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.2"
+}
diff --git a/sdk/dev-jobs.dockerfile b/sdk/dev-jobs.dockerfile
index b55b056b2d..f66f670d81 100644
--- a/sdk/dev-jobs.dockerfile
+++ b/sdk/dev-jobs.dockerfile
@@ -9,38 +9,23 @@
# version.
#
# Use arvados/build/build-dev-docker-jobs-image.sh to build.
-#
-# (This dockerfile file must be located in the arvados/sdk/ directory because
-# of the docker build root.)
-FROM debian:buster-slim
+FROM debian:bullseye-slim
MAINTAINER Arvados Package Maintainers
-ENV DEBIAN_FRONTEND noninteractive
-
-ARG pythoncmd=python3
-ARG pipcmd=pip3
-
-RUN apt-get update -q && apt-get install -qy --no-install-recommends \
- git ${pythoncmd}-pip ${pythoncmd}-virtualenv ${pythoncmd}-dev libcurl4-gnutls-dev \
- libgnutls28-dev nodejs ${pythoncmd}-pyasn1-modules build-essential ${pythoncmd}-setuptools
-
-ARG sdk
-ARG runner
-ARG salad
-ARG cwltool
+RUN DEBIAN_FRONTEND=noninteractive apt-get update -q && apt-get install -qy --no-install-recommends \
+ git python3-dev python3-venv libcurl4-gnutls-dev libgnutls28-dev nodejs build-essential
-ADD python/dist/$sdk /tmp/
-ADD cwl/salad_dist/$salad /tmp/
-ADD cwl/cwltool_dist/$cwltool /tmp/
-ADD cwl/dist/$runner /tmp/
+RUN python3 -m venv /opt/arvados-py
+ENV PATH=/opt/arvados-py/bin:/usr/local/bin:/usr/bin:/bin
+RUN python3 -m pip install --no-cache-dir setuptools wheel
-RUN cd /tmp/arvados-python-client-* && $pipcmd install .
-RUN if test -d /tmp/schema-salad-* ; then cd /tmp/schema-salad-* && $pipcmd install . ; fi
-RUN if test -d /tmp/cwltool-* ; then cd /tmp/cwltool-* && $pipcmd install . ; fi
-RUN cd /tmp/arvados-cwl-runner-* && $pipcmd install .
+# The build script sets up our build context with all the Python source
+# packages to install.
+COPY . /usr/local/src/
+# Run a-c-r afterward to check for a successful install.
+RUN python3 -m pip install --no-cache-dir /usr/local/src/* && arvados-cwl-runner --version && crunchstat-summary --version
-# Install dependencies and set up system.
RUN /usr/sbin/adduser --disabled-password \
--gecos 'Crunch execution user' crunch && \
/usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
diff --git a/sdk/go/arvados/api.go b/sdk/go/arvados/api.go
index 3797a17f50..c3d0ea8aef 100644
--- a/sdk/go/arvados/api.go
+++ b/sdk/go/arvados/api.go
@@ -23,75 +23,91 @@ type APIEndpoint struct {
}
var (
- EndpointConfigGet = APIEndpoint{"GET", "arvados/v1/config", ""}
- EndpointVocabularyGet = APIEndpoint{"GET", "arvados/v1/vocabulary", ""}
- EndpointLogin = APIEndpoint{"GET", "login", ""}
- EndpointLogout = APIEndpoint{"GET", "logout", ""}
- EndpointCollectionCreate = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
- EndpointCollectionUpdate = APIEndpoint{"PATCH", "arvados/v1/collections/{uuid}", "collection"}
- EndpointCollectionGet = APIEndpoint{"GET", "arvados/v1/collections/{uuid}", ""}
- EndpointCollectionList = APIEndpoint{"GET", "arvados/v1/collections", ""}
- EndpointCollectionProvenance = APIEndpoint{"GET", "arvados/v1/collections/{uuid}/provenance", ""}
- EndpointCollectionUsedBy = APIEndpoint{"GET", "arvados/v1/collections/{uuid}/used_by", ""}
- EndpointCollectionDelete = APIEndpoint{"DELETE", "arvados/v1/collections/{uuid}", ""}
- EndpointCollectionTrash = APIEndpoint{"POST", "arvados/v1/collections/{uuid}/trash", ""}
- EndpointCollectionUntrash = APIEndpoint{"POST", "arvados/v1/collections/{uuid}/untrash", ""}
- EndpointSpecimenCreate = APIEndpoint{"POST", "arvados/v1/specimens", "specimen"}
- EndpointSpecimenUpdate = APIEndpoint{"PATCH", "arvados/v1/specimens/{uuid}", "specimen"}
- EndpointSpecimenGet = APIEndpoint{"GET", "arvados/v1/specimens/{uuid}", ""}
- EndpointSpecimenList = APIEndpoint{"GET", "arvados/v1/specimens", ""}
- EndpointSpecimenDelete = APIEndpoint{"DELETE", "arvados/v1/specimens/{uuid}", ""}
- EndpointContainerCreate = APIEndpoint{"POST", "arvados/v1/containers", "container"}
- EndpointContainerUpdate = APIEndpoint{"PATCH", "arvados/v1/containers/{uuid}", "container"}
- EndpointContainerGet = APIEndpoint{"GET", "arvados/v1/containers/{uuid}", ""}
- EndpointContainerList = APIEndpoint{"GET", "arvados/v1/containers", ""}
- EndpointContainerDelete = APIEndpoint{"DELETE", "arvados/v1/containers/{uuid}", ""}
- EndpointContainerLock = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/lock", ""}
- EndpointContainerUnlock = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/unlock", ""}
- EndpointContainerSSH = APIEndpoint{"POST", "arvados/v1/connect/{uuid}/ssh", ""} // move to /containers after #17014 fixes routing
- EndpointContainerGatewayTunnel = APIEndpoint{"POST", "arvados/v1/connect/{uuid}/gateway_tunnel", ""} // move to /containers after #17014 fixes routing
- EndpointContainerRequestCreate = APIEndpoint{"POST", "arvados/v1/container_requests", "container_request"}
- EndpointContainerRequestUpdate = APIEndpoint{"PATCH", "arvados/v1/container_requests/{uuid}", "container_request"}
- EndpointContainerRequestGet = APIEndpoint{"GET", "arvados/v1/container_requests/{uuid}", ""}
- EndpointContainerRequestList = APIEndpoint{"GET", "arvados/v1/container_requests", ""}
- EndpointContainerRequestDelete = APIEndpoint{"DELETE", "arvados/v1/container_requests/{uuid}", ""}
- EndpointGroupCreate = APIEndpoint{"POST", "arvados/v1/groups", "group"}
- EndpointGroupUpdate = APIEndpoint{"PATCH", "arvados/v1/groups/{uuid}", "group"}
- EndpointGroupGet = APIEndpoint{"GET", "arvados/v1/groups/{uuid}", ""}
- EndpointGroupList = APIEndpoint{"GET", "arvados/v1/groups", ""}
- EndpointGroupContents = APIEndpoint{"GET", "arvados/v1/groups/contents", ""}
- EndpointGroupContentsUUIDInPath = APIEndpoint{"GET", "arvados/v1/groups/{uuid}/contents", ""} // Alternative HTTP route; client-side code should always use EndpointGroupContents instead
- EndpointGroupShared = APIEndpoint{"GET", "arvados/v1/groups/shared", ""}
- EndpointGroupDelete = APIEndpoint{"DELETE", "arvados/v1/groups/{uuid}", ""}
- EndpointGroupTrash = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/trash", ""}
- EndpointGroupUntrash = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/untrash", ""}
- EndpointLinkCreate = APIEndpoint{"POST", "arvados/v1/links", "link"}
- EndpointLinkUpdate = APIEndpoint{"PATCH", "arvados/v1/links/{uuid}", "link"}
- EndpointLinkGet = APIEndpoint{"GET", "arvados/v1/links/{uuid}", ""}
- EndpointLinkList = APIEndpoint{"GET", "arvados/v1/links", ""}
- EndpointLinkDelete = APIEndpoint{"DELETE", "arvados/v1/links/{uuid}", ""}
- EndpointSysTrashSweep = APIEndpoint{"POST", "sys/trash_sweep", ""}
- EndpointUserActivate = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
- EndpointUserCreate = APIEndpoint{"POST", "arvados/v1/users", "user"}
- EndpointUserCurrent = APIEndpoint{"GET", "arvados/v1/users/current", ""}
- EndpointUserDelete = APIEndpoint{"DELETE", "arvados/v1/users/{uuid}", ""}
- EndpointUserGet = APIEndpoint{"GET", "arvados/v1/users/{uuid}", ""}
- EndpointUserGetCurrent = APIEndpoint{"GET", "arvados/v1/users/current", ""}
- EndpointUserGetSystem = APIEndpoint{"GET", "arvados/v1/users/system", ""}
- EndpointUserList = APIEndpoint{"GET", "arvados/v1/users", ""}
- EndpointUserMerge = APIEndpoint{"POST", "arvados/v1/users/merge", ""}
- EndpointUserSetup = APIEndpoint{"POST", "arvados/v1/users/setup", "user"}
- EndpointUserSystem = APIEndpoint{"GET", "arvados/v1/users/system", ""}
- EndpointUserUnsetup = APIEndpoint{"POST", "arvados/v1/users/{uuid}/unsetup", ""}
- EndpointUserUpdate = APIEndpoint{"PATCH", "arvados/v1/users/{uuid}", "user"}
- EndpointUserBatchUpdate = APIEndpoint{"PATCH", "arvados/v1/users/batch_update", ""}
- EndpointUserAuthenticate = APIEndpoint{"POST", "arvados/v1/users/authenticate", ""}
- EndpointAPIClientAuthorizationCurrent = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/current", ""}
- EndpointAPIClientAuthorizationCreate = APIEndpoint{"POST", "arvados/v1/api_client_authorizations", "api_client_authorization"}
- EndpointAPIClientAuthorizationUpdate = APIEndpoint{"PUT", "arvados/v1/api_client_authorizations/{uuid}", "api_client_authorization"}
- EndpointAPIClientAuthorizationList = APIEndpoint{"GET", "arvados/v1/api_client_authorizations", ""}
- EndpointAPIClientAuthorizationDelete = APIEndpoint{"DELETE", "arvados/v1/api_client_authorizations/{uuid}", ""}
- EndpointAPIClientAuthorizationGet = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/{uuid}", ""}
+ EndpointConfigGet = APIEndpoint{"GET", "arvados/v1/config", ""}
+ EndpointVocabularyGet = APIEndpoint{"GET", "arvados/v1/vocabulary", ""}
+ EndpointDiscoveryDocument = APIEndpoint{"GET", "discovery/v1/apis/arvados/v1/rest", ""}
+ EndpointLogin = APIEndpoint{"GET", "login", ""}
+ EndpointLogout = APIEndpoint{"GET", "logout", ""}
+ EndpointAuthorizedKeyCreate = APIEndpoint{"POST", "arvados/v1/authorized_keys", "authorized_key"}
+ EndpointAuthorizedKeyUpdate = APIEndpoint{"PATCH", "arvados/v1/authorized_keys/{uuid}", "authorized_key"}
+ EndpointAuthorizedKeyGet = APIEndpoint{"GET", "arvados/v1/authorized_keys/{uuid}", ""}
+ EndpointAuthorizedKeyList = APIEndpoint{"GET", "arvados/v1/authorized_keys", ""}
+ EndpointAuthorizedKeyDelete = APIEndpoint{"DELETE", "arvados/v1/authorized_keys/{uuid}", ""}
+ EndpointCollectionCreate = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
+ EndpointCollectionUpdate = APIEndpoint{"PATCH", "arvados/v1/collections/{uuid}", "collection"}
+ EndpointCollectionGet = APIEndpoint{"GET", "arvados/v1/collections/{uuid}", ""}
+ EndpointCollectionList = APIEndpoint{"GET", "arvados/v1/collections", ""}
+ EndpointCollectionProvenance = APIEndpoint{"GET", "arvados/v1/collections/{uuid}/provenance", ""}
+ EndpointCollectionUsedBy = APIEndpoint{"GET", "arvados/v1/collections/{uuid}/used_by", ""}
+ EndpointCollectionDelete = APIEndpoint{"DELETE", "arvados/v1/collections/{uuid}", ""}
+ EndpointCollectionTrash = APIEndpoint{"POST", "arvados/v1/collections/{uuid}/trash", ""}
+ EndpointCollectionUntrash = APIEndpoint{"POST", "arvados/v1/collections/{uuid}/untrash", ""}
+ EndpointSpecimenCreate = APIEndpoint{"POST", "arvados/v1/specimens", "specimen"}
+ EndpointSpecimenUpdate = APIEndpoint{"PATCH", "arvados/v1/specimens/{uuid}", "specimen"}
+ EndpointSpecimenGet = APIEndpoint{"GET", "arvados/v1/specimens/{uuid}", ""}
+ EndpointSpecimenList = APIEndpoint{"GET", "arvados/v1/specimens", ""}
+ EndpointSpecimenDelete = APIEndpoint{"DELETE", "arvados/v1/specimens/{uuid}", ""}
+ EndpointContainerCreate = APIEndpoint{"POST", "arvados/v1/containers", "container"}
+ EndpointContainerUpdate = APIEndpoint{"PATCH", "arvados/v1/containers/{uuid}", "container"}
+ EndpointContainerPriorityUpdate = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/update_priority", "container"}
+ EndpointContainerGet = APIEndpoint{"GET", "arvados/v1/containers/{uuid}", ""}
+ EndpointContainerList = APIEndpoint{"GET", "arvados/v1/containers", ""}
+ EndpointContainerDelete = APIEndpoint{"DELETE", "arvados/v1/containers/{uuid}", ""}
+ EndpointContainerLock = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/lock", ""}
+ EndpointContainerUnlock = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/unlock", ""}
+ EndpointContainerSSH = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/ssh", ""}
+ EndpointContainerSSHCompat = APIEndpoint{"POST", "arvados/v1/connect/{uuid}/ssh", ""} // for compatibility with arvados <2.7
+ EndpointContainerGatewayTunnel = APIEndpoint{"POST", "arvados/v1/containers/{uuid}/gateway_tunnel", ""}
+ EndpointContainerGatewayTunnelCompat = APIEndpoint{"POST", "arvados/v1/connect/{uuid}/gateway_tunnel", ""} // for compatibility with arvados <2.7
+ EndpointContainerRequestCreate = APIEndpoint{"POST", "arvados/v1/container_requests", "container_request"}
+ EndpointContainerRequestUpdate = APIEndpoint{"PATCH", "arvados/v1/container_requests/{uuid}", "container_request"}
+ EndpointContainerRequestGet = APIEndpoint{"GET", "arvados/v1/container_requests/{uuid}", ""}
+ EndpointContainerRequestList = APIEndpoint{"GET", "arvados/v1/container_requests", ""}
+ EndpointContainerRequestDelete = APIEndpoint{"DELETE", "arvados/v1/container_requests/{uuid}", ""}
+ EndpointContainerRequestContainerStatus = APIEndpoint{"GET", "arvados/v1/container_requests/{uuid}/container_status", ""}
+ EndpointContainerRequestLog = APIEndpoint{"GET", "arvados/v1/container_requests/{uuid}/log{path:|/.*}", ""}
+ EndpointGroupCreate = APIEndpoint{"POST", "arvados/v1/groups", "group"}
+ EndpointGroupUpdate = APIEndpoint{"PATCH", "arvados/v1/groups/{uuid}", "group"}
+ EndpointGroupGet = APIEndpoint{"GET", "arvados/v1/groups/{uuid}", ""}
+ EndpointGroupList = APIEndpoint{"GET", "arvados/v1/groups", ""}
+ EndpointGroupContents = APIEndpoint{"GET", "arvados/v1/groups/contents", ""}
+ EndpointGroupContentsUUIDInPath = APIEndpoint{"GET", "arvados/v1/groups/{uuid}/contents", ""} // Alternative HTTP route; client-side code should always use EndpointGroupContents instead
+ EndpointGroupShared = APIEndpoint{"GET", "arvados/v1/groups/shared", ""}
+ EndpointGroupDelete = APIEndpoint{"DELETE", "arvados/v1/groups/{uuid}", ""}
+ EndpointGroupTrash = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/trash", ""}
+ EndpointGroupUntrash = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/untrash", ""}
+ EndpointLinkCreate = APIEndpoint{"POST", "arvados/v1/links", "link"}
+ EndpointLinkUpdate = APIEndpoint{"PATCH", "arvados/v1/links/{uuid}", "link"}
+ EndpointLinkGet = APIEndpoint{"GET", "arvados/v1/links/{uuid}", ""}
+ EndpointLinkList = APIEndpoint{"GET", "arvados/v1/links", ""}
+ EndpointLinkDelete = APIEndpoint{"DELETE", "arvados/v1/links/{uuid}", ""}
+ EndpointLogCreate = APIEndpoint{"POST", "arvados/v1/logs", "log"}
+ EndpointLogUpdate = APIEndpoint{"PATCH", "arvados/v1/logs/{uuid}", "log"}
+ EndpointLogGet = APIEndpoint{"GET", "arvados/v1/logs/{uuid}", ""}
+ EndpointLogList = APIEndpoint{"GET", "arvados/v1/logs", ""}
+ EndpointLogDelete = APIEndpoint{"DELETE", "arvados/v1/logs/{uuid}", ""}
+ EndpointSysTrashSweep = APIEndpoint{"POST", "sys/trash_sweep", ""}
+ EndpointUserActivate = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
+ EndpointUserCreate = APIEndpoint{"POST", "arvados/v1/users", "user"}
+ EndpointUserCurrent = APIEndpoint{"GET", "arvados/v1/users/current", ""}
+ EndpointUserDelete = APIEndpoint{"DELETE", "arvados/v1/users/{uuid}", ""}
+ EndpointUserGet = APIEndpoint{"GET", "arvados/v1/users/{uuid}", ""}
+ EndpointUserGetCurrent = APIEndpoint{"GET", "arvados/v1/users/current", ""}
+ EndpointUserGetSystem = APIEndpoint{"GET", "arvados/v1/users/system", ""}
+ EndpointUserList = APIEndpoint{"GET", "arvados/v1/users", ""}
+ EndpointUserMerge = APIEndpoint{"POST", "arvados/v1/users/merge", ""}
+ EndpointUserSetup = APIEndpoint{"POST", "arvados/v1/users/setup", "user"}
+ EndpointUserSystem = APIEndpoint{"GET", "arvados/v1/users/system", ""}
+ EndpointUserUnsetup = APIEndpoint{"POST", "arvados/v1/users/{uuid}/unsetup", ""}
+ EndpointUserUpdate = APIEndpoint{"PATCH", "arvados/v1/users/{uuid}", "user"}
+ EndpointUserBatchUpdate = APIEndpoint{"PATCH", "arvados/v1/users/batch_update", ""}
+ EndpointUserAuthenticate = APIEndpoint{"POST", "arvados/v1/users/authenticate", ""}
+ EndpointAPIClientAuthorizationCurrent = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/current", ""}
+ EndpointAPIClientAuthorizationCreate = APIEndpoint{"POST", "arvados/v1/api_client_authorizations", "api_client_authorization"}
+ EndpointAPIClientAuthorizationUpdate = APIEndpoint{"PUT", "arvados/v1/api_client_authorizations/{uuid}", "api_client_authorization"}
+ EndpointAPIClientAuthorizationList = APIEndpoint{"GET", "arvados/v1/api_client_authorizations", ""}
+ EndpointAPIClientAuthorizationDelete = APIEndpoint{"DELETE", "arvados/v1/api_client_authorizations/{uuid}", ""}
+ EndpointAPIClientAuthorizationGet = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/{uuid}", ""}
)
type ContainerSSHOptions struct {
@@ -226,11 +242,17 @@ type LogoutOptions struct {
ReturnTo string `json:"return_to"` // Redirect to this URL after logging out
}
+type BlockReadOptions struct {
+ Locator string
+ WriteTo io.Writer
+ LocalLocator func(string)
+}
+
type BlockWriteOptions struct {
Hash string
Data []byte
- Reader io.Reader
- DataSize int // Must be set if Data is nil.
+ Reader io.Reader // Must be set if Data is nil.
+ DataSize int // Must be set if Data is nil.
RequestID string
StorageClasses []string
Replicas int
@@ -238,8 +260,21 @@ type BlockWriteOptions struct {
}
type BlockWriteResponse struct {
- Locator string
- Replicas int
+ Locator string
+ Replicas int
+ StorageClasses map[string]int
+}
+
+type WebDAVOptions struct {
+ Method string
+ Path string
+ Header http.Header
+}
+
+type ContainerLogOptions struct {
+ UUID string `json:"uuid"`
+ NoForward bool `json:"no_forward"`
+ WebDAVOptions
}
type API interface {
@@ -247,6 +282,11 @@ type API interface {
VocabularyGet(ctx context.Context) (Vocabulary, error)
Login(ctx context.Context, options LoginOptions) (LoginResponse, error)
Logout(ctx context.Context, options LogoutOptions) (LogoutResponse, error)
+ AuthorizedKeyCreate(ctx context.Context, options CreateOptions) (AuthorizedKey, error)
+ AuthorizedKeyUpdate(ctx context.Context, options UpdateOptions) (AuthorizedKey, error)
+ AuthorizedKeyGet(ctx context.Context, options GetOptions) (AuthorizedKey, error)
+ AuthorizedKeyList(ctx context.Context, options ListOptions) (AuthorizedKeyList, error)
+ AuthorizedKeyDelete(ctx context.Context, options DeleteOptions) (AuthorizedKey, error)
CollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)
CollectionUpdate(ctx context.Context, options UpdateOptions) (Collection, error)
CollectionGet(ctx context.Context, options GetOptions) (Collection, error)
@@ -258,6 +298,7 @@ type API interface {
CollectionUntrash(ctx context.Context, options UntrashOptions) (Collection, error)
ContainerCreate(ctx context.Context, options CreateOptions) (Container, error)
ContainerUpdate(ctx context.Context, options UpdateOptions) (Container, error)
+ ContainerPriorityUpdate(ctx context.Context, options UpdateOptions) (Container, error)
ContainerGet(ctx context.Context, options GetOptions) (Container, error)
ContainerList(ctx context.Context, options ListOptions) (ContainerList, error)
ContainerDelete(ctx context.Context, options DeleteOptions) (Container, error)
@@ -270,6 +311,8 @@ type API interface {
ContainerRequestGet(ctx context.Context, options GetOptions) (ContainerRequest, error)
ContainerRequestList(ctx context.Context, options ListOptions) (ContainerRequestList, error)
ContainerRequestDelete(ctx context.Context, options DeleteOptions) (ContainerRequest, error)
+ ContainerRequestContainerStatus(ctx context.Context, options GetOptions) (ContainerStatus, error)
+ ContainerRequestLog(ctx context.Context, options ContainerLogOptions) (http.Handler, error)
GroupCreate(ctx context.Context, options CreateOptions) (Group, error)
GroupUpdate(ctx context.Context, options UpdateOptions) (Group, error)
GroupGet(ctx context.Context, options GetOptions) (Group, error)
@@ -284,6 +327,11 @@ type API interface {
LinkGet(ctx context.Context, options GetOptions) (Link, error)
LinkList(ctx context.Context, options ListOptions) (LinkList, error)
LinkDelete(ctx context.Context, options DeleteOptions) (Link, error)
+ LogCreate(ctx context.Context, options CreateOptions) (Log, error)
+ LogUpdate(ctx context.Context, options UpdateOptions) (Log, error)
+ LogGet(ctx context.Context, options GetOptions) (Log, error)
+ LogList(ctx context.Context, options ListOptions) (LogList, error)
+ LogDelete(ctx context.Context, options DeleteOptions) (Log, error)
SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
@@ -309,4 +357,5 @@ type API interface {
APIClientAuthorizationDelete(ctx context.Context, options DeleteOptions) (APIClientAuthorization, error)
APIClientAuthorizationUpdate(ctx context.Context, options UpdateOptions) (APIClientAuthorization, error)
APIClientAuthorizationGet(ctx context.Context, options GetOptions) (APIClientAuthorization, error)
+ DiscoveryDocument(ctx context.Context) (DiscoveryDocument, error)
}
diff --git a/sdk/go/arvados/authorized_key.go b/sdk/go/arvados/authorized_key.go
new file mode 100644
index 0000000000..642fc11261
--- /dev/null
+++ b/sdk/go/arvados/authorized_key.go
@@ -0,0 +1,31 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+// AuthorizedKey is an arvados#authorizedKey resource.
+type AuthorizedKey struct {
+ UUID string `json:"uuid"`
+ Etag string `json:"etag"`
+ OwnerUUID string `json:"owner_uuid"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
+ ModifiedByClientUUID string `json:"modified_by_client_uuid"`
+ ModifiedByUserUUID string `json:"modified_by_user_uuid"`
+ Name string `json:"name"`
+ AuthorizedUserUUID string `json:"authorized_user_uuid"`
+ PublicKey string `json:"public_key"`
+ KeyType string `json:"key_type"`
+ ExpiresAt time.Time `json:"expires_at"`
+}
+
+// AuthorizedKeyList is an arvados#authorizedKeyList resource.
+type AuthorizedKeyList struct {
+ Items []AuthorizedKey `json:"items"`
+ ItemsAvailable int `json:"items_available"`
+ Offset int `json:"offset"`
+ Limit int `json:"limit"`
+}
diff --git a/sdk/go/arvados/byte_size.go b/sdk/go/arvados/byte_size.go
index 08cc83e126..7cc2c69781 100644
--- a/sdk/go/arvados/byte_size.go
+++ b/sdk/go/arvados/byte_size.go
@@ -8,11 +8,16 @@ import (
"encoding/json"
"fmt"
"math"
+ "strconv"
"strings"
)
type ByteSize int64
+// ByteSizeOrPercent indicates either a number of bytes or a
+// percentage from 1 to 100.
+type ByteSizeOrPercent ByteSize
+
var prefixValue = map[string]int64{
"": 1,
"K": 1000,
@@ -89,3 +94,54 @@ func (n *ByteSize) UnmarshalJSON(data []byte) error {
return fmt.Errorf("bug: json.Number for %q is not int64 or float64: %s", s, err)
}
}
+
+func (n ByteSizeOrPercent) MarshalJSON() ([]byte, error) {
+ if n < 0 && n >= -100 {
+ return []byte(fmt.Sprintf("\"%d%%\"", -n)), nil
+ } else {
+ return json.Marshal(int64(n))
+ }
+}
+
+func (n *ByteSizeOrPercent) UnmarshalJSON(data []byte) error {
+ if len(data) == 0 || data[0] != '"' {
+ return (*ByteSize)(n).UnmarshalJSON(data)
+ }
+ var s string
+ err := json.Unmarshal(data, &s)
+ if err != nil {
+ return err
+ }
+ if s := strings.TrimSpace(s); len(s) > 0 && s[len(s)-1] == '%' {
+ pct, err := strconv.ParseInt(strings.TrimSpace(s[:len(s)-1]), 10, 64)
+ if err != nil {
+ return err
+ }
+ if pct < 0 || pct > 100 {
+ return fmt.Errorf("invalid value %q (percentage must be between 0 and 100)", s)
+ }
+ *n = ByteSizeOrPercent(-pct)
+ return nil
+ }
+ return (*ByteSize)(n).UnmarshalJSON(data)
+}
+
+// ByteSize returns the absolute byte size specified by n, or 0 if n
+// specifies a percent.
+func (n ByteSizeOrPercent) ByteSize() ByteSize {
+ if n >= -100 && n < 0 {
+ return 0
+ } else {
+ return ByteSize(n)
+ }
+}
+
+// ByteSize returns the percentage specified by n, or 0 if n specifies
+// an absolute byte size.
+func (n ByteSizeOrPercent) Percent() int64 {
+ if n >= -100 && n < 0 {
+ return int64(-n)
+ } else {
+ return 0
+ }
+}
diff --git a/sdk/go/arvados/byte_size_test.go b/sdk/go/arvados/byte_size_test.go
index 7c4aff2072..e5fb10ebdb 100644
--- a/sdk/go/arvados/byte_size_test.go
+++ b/sdk/go/arvados/byte_size_test.go
@@ -64,7 +64,54 @@ func (s *ByteSizeSuite) TestUnmarshal(c *check.C) {
} {
var n ByteSize
err := yaml.Unmarshal([]byte(testcase+"\n"), &n)
- c.Logf("%v => error: %v", n, err)
+ c.Logf("%s => error: %v", testcase, err)
+ c.Check(err, check.NotNil)
+ }
+}
+
+func (s *ByteSizeSuite) TestMarshalByteSizeOrPercent(c *check.C) {
+ for _, testcase := range []struct {
+ in ByteSizeOrPercent
+ out string
+ }{
+ {0, "0"},
+ {-1, "1%"},
+ {-100, "100%"},
+ {8, "8"},
+ } {
+ out, err := yaml.Marshal(&testcase.in)
+ c.Check(err, check.IsNil)
+ c.Check(string(out), check.Equals, testcase.out+"\n")
+ }
+}
+
+func (s *ByteSizeSuite) TestUnmarshalByteSizeOrPercent(c *check.C) {
+ for _, testcase := range []struct {
+ in string
+ out int64
+ }{
+ {"0", 0},
+ {"100", 100},
+ {"0%", 0},
+ {"1%", -1},
+ {"100%", -100},
+ {"8 GB", 8000000000},
+ } {
+ var n ByteSizeOrPercent
+ err := yaml.Unmarshal([]byte(testcase.in+"\n"), &n)
+ c.Logf("%v => %v: %v", testcase.in, testcase.out, n)
+ c.Check(err, check.IsNil)
+ c.Check(int64(n), check.Equals, testcase.out)
+ }
+ for _, testcase := range []string{
+ "1000%", "101%", "-1%",
+ "%", "-%", "%%", "%1",
+ "400000 EB",
+ "4.11e4 EB",
+ } {
+ var n ByteSizeOrPercent
+ err := yaml.Unmarshal([]byte(testcase+"\n"), &n)
+ c.Logf("%s => error: %v", testcase, err)
c.Check(err, check.NotNil)
}
}
diff --git a/sdk/go/arvados/client.go b/sdk/go/arvados/client.go
index 24d5ac3e33..7bc3d5bc42 100644
--- a/sdk/go/arvados/client.go
+++ b/sdk/go/arvados/client.go
@@ -7,6 +7,7 @@ package arvados
import (
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"encoding/json"
"errors"
@@ -15,14 +16,22 @@ import (
"io/fs"
"io/ioutil"
"log"
+ "math"
+ "math/big"
+ mathrand "math/rand"
+ "net"
"net/http"
"net/url"
"os"
"regexp"
+ "strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/hashicorp/go-retryablehttp"
)
// A Client is an HTTP client with an API endpoint and a set of
@@ -61,11 +70,18 @@ type Client struct {
// Timeout for requests. NewClientFromConfig and
// NewClientFromEnv return a Client with a default 5 minute
- // timeout. To disable this timeout and rely on each
- // http.Request's context deadline instead, set Timeout to
- // zero.
+ // timeout. Within this time, retryable errors are
+ // automatically retried with exponential backoff.
+ //
+ // To disable automatic retries, set Timeout to zero and use a
+ // context deadline to establish a maximum request time.
Timeout time.Duration
+ // Maximum disk cache size in bytes or percent of total
+ // filesystem size. If zero, use default, currently 10% of
+ // filesystem size.
+ DiskCacheSize ByteSizeOrPercent
+
dd *DiscoveryDocument
defaultRequestID string
@@ -73,6 +89,17 @@ type Client struct {
// APIHost and AuthToken were loaded from ARVADOS_* env vars
// (used to customize "no host/token" error messages)
loadedFromEnv bool
+
+ // Track/limit concurrent outgoing API calls. Note this
+ // differs from an outgoing connection limit (a feature
+ // provided by http.Transport) when concurrent calls are
+ // multiplexed on a single http2 connection.
+ //
+ // getRequestLimiter() should always be used, because this can
+ // be nil.
+ requestLimiter *requestLimiter
+
+ last503 atomic.Value
}
// InsecureHTTPClient is the default http.Client used by a Client with
@@ -94,11 +121,46 @@ func NewClientFromConfig(cluster *Cluster) (*Client, error) {
if ctrlURL.Host == "" {
return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
}
+ var hc *http.Client
+ if srvaddr := os.Getenv("ARVADOS_SERVER_ADDRESS"); srvaddr != "" {
+ // When this client is used to make a request to
+ // https://{ctrlhost}:port/ (any port), it dials the
+ // indicated port on ARVADOS_SERVER_ADDRESS instead.
+ //
+ // This is invoked by arvados-server boot to ensure
+ // that server->server traffic (e.g.,
+ // keepproxy->controller) only hits local interfaces,
+ // even if the Controller.ExternalURL host is a load
+ // balancer / gateway and not a local interface
+ // address (e.g., when running on a cloud VM).
+ //
+ // This avoids unnecessary delay/cost of routing
+ // external traffic, and also allows controller to
+ // recognize other services as internal clients based
+ // on the connection source address.
+ divertedHost := (*url.URL)(&cluster.Services.Controller.ExternalURL).Hostname()
+ var dialer net.Dialer
+ hc = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: cluster.TLS.Insecure},
+ DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ host, port, err := net.SplitHostPort(addr)
+ if err == nil && network == "tcp" && host == divertedHost {
+ addr = net.JoinHostPort(srvaddr, port)
+ }
+ return dialer.DialContext(ctx, network, addr)
+ },
+ },
+ }
+ }
return &Client{
- Scheme: ctrlURL.Scheme,
- APIHost: ctrlURL.Host,
- Insecure: cluster.TLS.Insecure,
- Timeout: 5 * time.Minute,
+ Client: hc,
+ Scheme: ctrlURL.Scheme,
+ APIHost: ctrlURL.Host,
+ Insecure: cluster.TLS.Insecure,
+ Timeout: 5 * time.Minute,
+ DiskCacheSize: cluster.Collections.WebDAVCache.DiskCacheSize,
+ requestLimiter: &requestLimiter{maxlimit: int64(cluster.API.MaxConcurrentRequests / 4)},
}, nil
}
@@ -117,10 +179,10 @@ func NewClientFromConfig(cluster *Cluster) (*Client, error) {
// Space characters are trimmed when reading the settings file, so
// these are equivalent:
//
-// ARVADOS_API_HOST=localhost\n
-// ARVADOS_API_HOST=localhost\r\n
-// ARVADOS_API_HOST = localhost \n
-// \tARVADOS_API_HOST = localhost\n
+// ARVADOS_API_HOST=localhost\n
+// ARVADOS_API_HOST=localhost\r\n
+// ARVADOS_API_HOST = localhost \n
+// \tARVADOS_API_HOST = localhost\n
func NewClientFromEnv() *Client {
vars := map[string]string{}
home := os.Getenv("HOME")
@@ -184,10 +246,16 @@ func NewClientFromEnv() *Client {
var reqIDGen = httpserver.IDGenerator{Prefix: "req-"}
-// Do adds Authorization and X-Request-Id headers and then calls
-// (*http.Client)Do().
+var nopCancelFunc context.CancelFunc = func() {}
+
+var reqErrorRe = regexp.MustCompile(`net/http: invalid header `)
+
+// Do augments (*http.Client)Do(): adds Authorization and X-Request-Id
+// headers, delays in order to comply with rate-limiting restrictions,
+// and retries failed requests when appropriate.
func (c *Client) Do(req *http.Request) (*http.Response, error) {
- if auth, _ := req.Context().Value(contextKeyAuthorization{}).(string); auth != "" {
+ ctx := req.Context()
+ if auth, _ := ctx.Value(contextKeyAuthorization{}).(string); auth != "" {
req.Header.Add("Authorization", auth)
} else if c.AuthToken != "" {
req.Header.Add("Authorization", "OAuth2 "+c.AuthToken)
@@ -195,7 +263,7 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
if req.Header.Get("X-Request-Id") == "" {
var reqid string
- if ctxreqid, _ := req.Context().Value(contextKeyRequestID{}).(string); ctxreqid != "" {
+ if ctxreqid, _ := ctx.Value(contextKeyRequestID{}).(string); ctxreqid != "" {
reqid = ctxreqid
} else if c.defaultRequestID != "" {
reqid = c.defaultRequestID
@@ -208,25 +276,136 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
req.Header.Set("X-Request-Id", reqid)
}
}
- var cancel context.CancelFunc
+
+ rreq, err := retryablehttp.FromRequest(req)
+ if err != nil {
+ return nil, err
+ }
+
+ cancel := nopCancelFunc
+ var lastResp *http.Response
+ var lastRespBody io.ReadCloser
+ var lastErr error
+ var checkRetryCalled int
+
+ rclient := retryablehttp.NewClient()
+ rclient.HTTPClient = c.httpClient()
+ rclient.Backoff = exponentialBackoff
if c.Timeout > 0 {
- ctx := req.Context()
+ rclient.RetryWaitMax = c.Timeout / 10
+ rclient.RetryMax = 32
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(c.Timeout))
- req = req.WithContext(ctx)
- }
- resp, err := c.httpClient().Do(req)
- if err == nil && cancel != nil {
- // We need to call cancel() eventually, but we can't
- // use "defer cancel()" because the context has to
- // stay alive until the caller has finished reading
- // the response body.
- resp.Body = cancelOnClose{ReadCloser: resp.Body, cancel: cancel}
- } else if cancel != nil {
+ rreq = rreq.WithContext(ctx)
+ } else {
+ rclient.RetryMax = 0
+ }
+ rclient.CheckRetry = func(ctx context.Context, resp *http.Response, respErr error) (bool, error) {
+ checkRetryCalled++
+ if c.getRequestLimiter().Report(resp, respErr) {
+ c.last503.Store(time.Now())
+ }
+ if c.Timeout == 0 {
+ return false, nil
+ }
+ // This check can be removed when
+ // https://github.com/hashicorp/go-retryablehttp/pull/210
+ // (or equivalent) is merged and we update go.mod.
+ // Until then, it is needed to pass
+ // TestNonRetryableStdlibError.
+ if respErr != nil && reqErrorRe.MatchString(respErr.Error()) {
+ return false, nil
+ }
+ retrying, err := retryablehttp.DefaultRetryPolicy(ctx, resp, respErr)
+ if retrying {
+ lastResp, lastRespBody, lastErr = resp, nil, respErr
+ if respErr == nil {
+ // Save the response and body so we
+ // can return it instead of "deadline
+ // exceeded". retryablehttp.Client
+ // will drain and discard resp.body,
+ // so we need to stash it separately.
+ buf, err := ioutil.ReadAll(resp.Body)
+ if err == nil {
+ lastRespBody = io.NopCloser(bytes.NewReader(buf))
+ } else {
+ lastResp, lastErr = nil, err
+ }
+ }
+ }
+ return retrying, err
+ }
+ rclient.Logger = nil
+
+ limiter := c.getRequestLimiter()
+ limiter.Acquire(ctx)
+ if ctx.Err() != nil {
+ limiter.Release()
cancel()
+ return nil, ctx.Err()
+ }
+ resp, err := rclient.Do(rreq)
+ if (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) && (lastResp != nil || lastErr != nil) {
+ resp = lastResp
+ err = lastErr
+ if checkRetryCalled > 0 && err != nil {
+ // Mimic retryablehttp's "giving up after X
+ // attempts" message, even if we gave up
+ // because of time rather than maxretries.
+ err = fmt.Errorf("%s %s giving up after %d attempt(s): %w", req.Method, req.URL.String(), checkRetryCalled, err)
+ }
+ if resp != nil {
+ resp.Body = lastRespBody
+ }
+ }
+ if err != nil {
+ limiter.Release()
+ cancel()
+ return nil, err
+ }
+ // We need to call cancel() eventually, but we can't use
+ // "defer cancel()" because the context has to stay alive
+ // until the caller has finished reading the response body.
+ resp.Body = cancelOnClose{
+ ReadCloser: resp.Body,
+ cancel: func() {
+ limiter.Release()
+ cancel()
+ },
}
return resp, err
}
+// Last503 returns the time of the most recent HTTP 503 (Service
+// Unavailable) response. Zero time indicates never.
+func (c *Client) Last503() time.Time {
+ t, _ := c.last503.Load().(time.Time)
+ return t
+}
+
+// globalRequestLimiter entries (one for each APIHost) don't have a
+// hard limit on outgoing connections, but do add a delay and reduce
+// concurrency after 503 errors.
+var (
+ globalRequestLimiter = map[string]*requestLimiter{}
+ globalRequestLimiterLock sync.Mutex
+)
+
+// Get this client's requestLimiter, or a global requestLimiter
+// singleton for c's APIHost if this client doesn't have its own.
+func (c *Client) getRequestLimiter() *requestLimiter {
+ if c.requestLimiter != nil {
+ return c.requestLimiter
+ }
+ globalRequestLimiterLock.Lock()
+ defer globalRequestLimiterLock.Unlock()
+ limiter := globalRequestLimiter[c.APIHost]
+ if limiter == nil {
+ limiter = &requestLimiter{}
+ globalRequestLimiter[c.APIHost] = limiter
+ }
+ return limiter
+}
+
// cancelOnClose calls a provided CancelFunc when its wrapped
// ReadCloser's Close() method is called.
type cancelOnClose struct {
@@ -249,6 +428,40 @@ func isRedirectStatus(code int) bool {
}
}
+const minExponentialBackoffBase = time.Second
+
+// Implements retryablehttp.Backoff using the server-provided
+// Retry-After header if available, otherwise nearly-full jitter
+// exponential backoff (similar to
+// https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/),
+// in all cases respecting the provided min and max.
+func exponentialBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
+ if attemptNum > 0 && min < minExponentialBackoffBase {
+ min = minExponentialBackoffBase
+ }
+ var t time.Duration
+ if resp != nil && (resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable) {
+ if s := resp.Header.Get("Retry-After"); s != "" {
+ if sleep, err := strconv.ParseInt(s, 10, 64); err == nil {
+ t = time.Second * time.Duration(sleep)
+ } else if stamp, err := time.Parse(time.RFC1123, s); err == nil {
+ t = stamp.Sub(time.Now())
+ }
+ }
+ }
+ if t == 0 {
+ jitter := mathrand.New(mathrand.NewSource(int64(time.Now().Nanosecond()))).Float64()
+ t = min + time.Duration((math.Pow(2, float64(attemptNum))*float64(min)-float64(min))*jitter)
+ }
+ if t < min {
+ return min
+ } else if t > max {
+ return max
+ } else {
+ return t
+ }
+}
+
// DoAndDecode performs req and unmarshals the response (which must be
// JSON) into dst. Use this instead of RequestAndDecode if you need
// more control of the http.Request object.
@@ -294,11 +507,11 @@ func (c *Client) DoAndDecode(dst interface{}, req *http.Request) error {
// Convert an arbitrary struct to url.Values. For example,
//
-// Foo{Bar: []int{1,2,3}, Baz: "waz"}
+// Foo{Bar: []int{1,2,3}, Baz: "waz"}
//
// becomes
//
-// url.Values{`bar`:`{"a":[1,2,3]}`,`Baz`:`waz`}
+// url.Values{`bar`:`{"a":[1,2,3]}`,`Baz`:`waz`}
//
// params itself is returned if it is already an url.Values.
func anythingToValues(params interface{}) (url.Values, error) {
@@ -381,6 +594,12 @@ func (c *Client) RequestAndDecodeContext(ctx context.Context, dst interface{}, m
if err != nil {
return err
}
+ if dst == nil {
+ if urlValues == nil {
+ urlValues = url.Values{}
+ }
+ urlValues["select"] = []string{`["uuid"]`}
+ }
if urlValues == nil {
// Nothing to send
} else if body != nil || ((method == "GET" || method == "HEAD") && len(urlValues.Encode()) < 1000) {
@@ -454,7 +673,11 @@ func (c *Client) apiURL(path string) string {
if scheme == "" {
scheme = "https"
}
- return scheme + "://" + c.APIHost + "/" + path
+ // Double-slash in URLs tend to cause subtle hidden problems
+ // (e.g., they can behave differently when a load balancer is
+ // in the picture). Here we ensure exactly one "/" regardless
+ // of whether the given APIHost or path has a superfluous one.
+ return scheme + "://" + strings.TrimSuffix(c.APIHost, "/") + "/" + strings.TrimPrefix(path, "/")
}
// DiscoveryDocument is the Arvados server's description of itself.
@@ -465,6 +688,7 @@ type DiscoveryDocument struct {
GitURL string `json:"gitUrl"`
Schemas map[string]Schema `json:"schemas"`
Resources map[string]Resource `json:"resources"`
+ Revision string `json:"revision"`
}
type Resource struct {
@@ -565,3 +789,17 @@ func (c *Client) PathForUUID(method, uuid string) (string, error) {
}
return path, nil
}
+
+var maxUUIDInt = (&big.Int{}).Exp(big.NewInt(36), big.NewInt(15), nil)
+
+func RandomUUID(clusterID, infix string) string {
+ n, err := rand.Int(rand.Reader, maxUUIDInt)
+ if err != nil {
+ panic(err)
+ }
+ nstr := n.Text(36)
+ for len(nstr) < 15 {
+ nstr = "0" + nstr
+ }
+ return clusterID + "-" + infix + "-" + nstr
+}
diff --git a/sdk/go/arvados/client_test.go b/sdk/go/arvados/client_test.go
index 2363803cab..55e2f998c4 100644
--- a/sdk/go/arvados/client_test.go
+++ b/sdk/go/arvados/client_test.go
@@ -6,14 +6,19 @@ package arvados
import (
"bytes"
+ "context"
"fmt"
"io/ioutil"
+ "math"
+ "math/rand"
"net/http"
+ "net/http/httptest"
"net/url"
"os"
"strings"
"sync"
"testing/iotest"
+ "time"
check "gopkg.in/check.v1"
)
@@ -165,6 +170,44 @@ func (*clientSuite) TestAnythingToValues(c *check.C) {
}
}
+// select=["uuid"] is added automatically when RequestAndDecode's
+// destination argument is nil.
+func (*clientSuite) TestAutoSelectUUID(c *check.C) {
+ var req *http.Request
+ var err error
+ server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c.Check(r.ParseForm(), check.IsNil)
+ req = r
+ w.Write([]byte("{}"))
+ }))
+ client := Client{
+ APIHost: strings.TrimPrefix(server.URL, "https://"),
+ AuthToken: "zzz",
+ Insecure: true,
+ Timeout: 2 * time.Second,
+ }
+
+ req = nil
+ err = client.RequestAndDecode(nil, http.MethodPost, "test", nil, nil)
+ c.Check(err, check.IsNil)
+ c.Check(req.FormValue("select"), check.Equals, `["uuid"]`)
+
+ req = nil
+ err = client.RequestAndDecode(nil, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.IsNil)
+ c.Check(req.FormValue("select"), check.Equals, `["uuid"]`)
+
+ req = nil
+ err = client.RequestAndDecode(nil, http.MethodGet, "test", nil, map[string]interface{}{"select": []string{"blergh"}})
+ c.Check(err, check.IsNil)
+ c.Check(req.FormValue("select"), check.Equals, `["uuid"]`)
+
+ req = nil
+ err = client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, map[string]interface{}{"select": []string{"blergh"}})
+ c.Check(err, check.IsNil)
+ c.Check(req.FormValue("select"), check.Equals, `["blergh"]`)
+}
+
func (*clientSuite) TestLoadConfig(c *check.C) {
oldenv := os.Environ()
defer func() {
@@ -217,3 +260,198 @@ func (*clientSuite) TestLoadConfig(c *check.C) {
c.Check(client.APIHost, check.Equals, "[::]:3")
c.Check(client.Insecure, check.Equals, false)
}
+
+var _ = check.Suite(&clientRetrySuite{})
+
+type clientRetrySuite struct {
+ server *httptest.Server
+ client Client
+ reqs []*http.Request
+ respStatus chan int
+ respDelay time.Duration
+
+ origLimiterQuietPeriod time.Duration
+}
+
+func (s *clientRetrySuite) SetUpTest(c *check.C) {
+ // Test server: delay and return errors until a final status
+ // appears on the respStatus channel.
+ s.origLimiterQuietPeriod = requestLimiterQuietPeriod
+ requestLimiterQuietPeriod = time.Second / 100
+ s.server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ s.reqs = append(s.reqs, r)
+ delay := s.respDelay
+ if delay == 0 {
+ delay = time.Duration(rand.Int63n(int64(time.Second / 10)))
+ }
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+ select {
+ case code, ok := <-s.respStatus:
+ if !ok {
+ code = http.StatusOK
+ }
+ w.WriteHeader(code)
+ w.Write([]byte(`{}`))
+ case <-timer.C:
+ w.WriteHeader(http.StatusServiceUnavailable)
+ }
+ }))
+ s.reqs = nil
+ s.respStatus = make(chan int, 1)
+ s.client = Client{
+ APIHost: s.server.URL[8:],
+ AuthToken: "zzz",
+ Insecure: true,
+ Timeout: 2 * time.Second,
+ }
+}
+
+func (s *clientRetrySuite) TearDownTest(c *check.C) {
+ s.server.Close()
+ requestLimiterQuietPeriod = s.origLimiterQuietPeriod
+}
+
+func (s *clientRetrySuite) TestOK(c *check.C) {
+ s.respStatus <- http.StatusOK
+ err := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.IsNil)
+ c.Check(s.reqs, check.HasLen, 1)
+}
+
+func (s *clientRetrySuite) TestNetworkError(c *check.C) {
+ // Close the stub server to produce a "connection refused" error.
+ s.server.Close()
+
+ start := time.Now()
+ timeout := time.Second
+ ctx, cancel := context.WithDeadline(context.Background(), start.Add(timeout))
+ defer cancel()
+ s.client.Timeout = timeout * 2
+ err := s.client.RequestAndDecodeContext(ctx, &struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.ErrorMatches, `.*dial tcp .* connection refused.*`)
+ delta := time.Since(start)
+ c.Check(delta > timeout, check.Equals, true, check.Commentf("time.Since(start) == %v, timeout = %v", delta, timeout))
+}
+
+func (s *clientRetrySuite) TestNonRetryableError(c *check.C) {
+ s.respStatus <- http.StatusBadRequest
+ err := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.ErrorMatches, `.*400 Bad Request.*`)
+ c.Check(s.reqs, check.HasLen, 1)
+}
+
+// as of 0.7.2., retryablehttp does not recognize this as a
+// non-retryable error.
+func (s *clientRetrySuite) TestNonRetryableStdlibError(c *check.C) {
+ s.respStatus <- http.StatusOK
+ req, err := http.NewRequest(http.MethodGet, "https://"+s.client.APIHost+"/test", nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Good-Header", "T\033rrible header value")
+ err = s.client.DoAndDecode(&struct{}{}, req)
+ c.Check(err, check.ErrorMatches, `.*after 1 attempt.*net/http: invalid header .*`)
+ if !c.Check(s.reqs, check.HasLen, 0) {
+ c.Logf("%v", s.reqs[0])
+ }
+}
+
+func (s *clientRetrySuite) TestNonRetryableAfter503s(c *check.C) {
+ time.AfterFunc(time.Second, func() { s.respStatus <- http.StatusNotFound })
+ err := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.ErrorMatches, `.*404 Not Found.*`)
+}
+
+func (s *clientRetrySuite) TestOKAfter503s(c *check.C) {
+ start := time.Now()
+ delay := time.Second
+ time.AfterFunc(delay, func() { s.respStatus <- http.StatusOK })
+ err := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.IsNil)
+ c.Check(len(s.reqs) > 1, check.Equals, true, check.Commentf("len(s.reqs) == %d", len(s.reqs)))
+ c.Check(time.Since(start) > delay, check.Equals, true)
+}
+
+func (s *clientRetrySuite) TestTimeoutAfter503(c *check.C) {
+ s.respStatus <- http.StatusServiceUnavailable
+ s.respDelay = time.Second * 2
+ s.client.Timeout = time.Second / 2
+ err := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.ErrorMatches, `.*503 Service Unavailable.*`)
+ c.Check(s.reqs, check.HasLen, 2)
+}
+
+func (s *clientRetrySuite) Test503Forever(c *check.C) {
+ err := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.ErrorMatches, `.*503 Service Unavailable.*`)
+ c.Check(len(s.reqs) > 1, check.Equals, true, check.Commentf("len(s.reqs) == %d", len(s.reqs)))
+}
+
+func (s *clientRetrySuite) TestContextAlreadyCanceled(c *check.C) {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ err := s.client.RequestAndDecodeContext(ctx, &struct{}{}, http.MethodGet, "test", nil, nil)
+ c.Check(err, check.Equals, context.Canceled)
+}
+
+func (s *clientRetrySuite) TestExponentialBackoff(c *check.C) {
+ var min, max time.Duration
+ min, max = time.Second, 64*time.Second
+
+ t := exponentialBackoff(min, max, 0, nil)
+ c.Check(t, check.Equals, min)
+
+ for e := float64(1); e < 5; e += 1 {
+ ok := false
+ for i := 0; i < 20; i++ {
+ t = exponentialBackoff(min, max, int(e), nil)
+ // Every returned value must be between min and min(2^e, max)
+ c.Check(t >= min, check.Equals, true)
+ c.Check(t <= min*time.Duration(math.Pow(2, e)), check.Equals, true)
+ c.Check(t <= max, check.Equals, true)
+ // Check that jitter is actually happening by
+ // checking that at least one in 20 trials is
+ // between min*2^(e-.75) and min*2^(e-.25)
+ jittermin := time.Duration(float64(min) * math.Pow(2, e-0.75))
+ jittermax := time.Duration(float64(min) * math.Pow(2, e-0.25))
+ c.Logf("min %v max %v e %v jittermin %v jittermax %v t %v", min, max, e, jittermin, jittermax, t)
+ if t > jittermin && t < jittermax {
+ ok = true
+ break
+ }
+ }
+ c.Check(ok, check.Equals, true)
+ }
+
+ for i := 0; i < 20; i++ {
+ t := exponentialBackoff(min, max, 100, nil)
+ c.Check(t < max, check.Equals, true)
+ }
+
+ for _, trial := range []struct {
+ retryAfter string
+ expect time.Duration
+ }{
+ {"1", time.Second * 4}, // minimum enforced
+ {"5", time.Second * 5}, // header used
+ {"55", time.Second * 10}, // maximum enforced
+ {"eleventy-nine", time.Second * 4}, // invalid header, exponential backoff used
+ {time.Now().UTC().Add(time.Second).Format(time.RFC1123), time.Second * 4}, // minimum enforced
+ {time.Now().UTC().Add(time.Minute).Format(time.RFC1123), time.Second * 10}, // maximum enforced
+ {time.Now().UTC().Add(-time.Minute).Format(time.RFC1123), time.Second * 4}, // minimum enforced
+ } {
+ c.Logf("trial %+v", trial)
+ t := exponentialBackoff(time.Second*4, time.Second*10, 0, &http.Response{
+ StatusCode: http.StatusTooManyRequests,
+ Header: http.Header{"Retry-After": {trial.retryAfter}}})
+ c.Check(t, check.Equals, trial.expect)
+ }
+ t = exponentialBackoff(time.Second*4, time.Second*10, 0, &http.Response{
+ StatusCode: http.StatusTooManyRequests,
+ })
+ c.Check(t, check.Equals, time.Second*4)
+
+ t = exponentialBackoff(0, max, 0, nil)
+ c.Check(t, check.Equals, time.Duration(0))
+ t = exponentialBackoff(0, max, 1, nil)
+ c.Check(t, check.Not(check.Equals), time.Duration(0))
+}
diff --git a/sdk/go/arvados/collection.go b/sdk/go/arvados/collection.go
index 389fe4e484..1e9616c428 100644
--- a/sdk/go/arvados/collection.go
+++ b/sdk/go/arvados/collection.go
@@ -104,28 +104,57 @@ type CollectionList struct {
Limit int `json:"limit"`
}
-var (
- blkRe = regexp.MustCompile(`^ [0-9a-f]{32}\+\d+`)
- tokRe = regexp.MustCompile(` ?[^ ]*`)
-)
-
// PortableDataHash computes the portable data hash of the given
// manifest.
func PortableDataHash(mt string) string {
+ // To calculate the PDH, we write the manifest to an md5 hash
+ // func, except we skip the "extra" part of block tokens that
+ // look like "abcdef0123456789abcdef0123456789+12345+extra".
+ //
+ // This code is simplified by the facts that (A) all block
+ // tokens -- even the first and last in a stream -- are
+ // preceded and followed by a space character; and (B) all
+ // non-block tokens either start with '.' or contain ':'.
+ //
+ // A regexp-based approach (like the one this replaced) would
+ // be more readable, but very slow.
h := md5.New()
size := 0
- _ = tokRe.ReplaceAllFunc([]byte(mt), func(tok []byte) []byte {
- if m := blkRe.Find(tok); m != nil {
- // write hash+size, ignore remaining block hints
- tok = m
+ todo := []byte(mt)
+ for len(todo) > 0 {
+ // sp is the end of the current token (note that if
+ // the current token is the last file token in a
+ // stream, we'll also include the \n and the dirname
+ // token on the next line, which is perfectly fine for
+ // our purposes).
+ sp := bytes.IndexByte(todo, ' ')
+ if sp < 0 {
+ // Last token of the manifest, which is never
+ // a block token.
+ n, _ := h.Write(todo)
+ size += n
+ break
}
- n, err := h.Write(tok)
- if err != nil {
- panic(err)
+ if sp >= 34 && todo[32] == '+' && bytes.IndexByte(todo[:32], ':') == -1 && todo[0] != '.' {
+ // todo[:sp] is a block token.
+ sizeend := bytes.IndexByte(todo[33:sp], '+')
+ if sizeend < 0 {
+ // "hash+size"
+ sizeend = sp
+ } else {
+ // "hash+size+extra"
+ sizeend += 33
+ }
+ n, _ := h.Write(todo[:sizeend])
+ h.Write([]byte{' '})
+ size += n + 1
+ } else {
+ // todo[:sp] is not a block token.
+ n, _ := h.Write(todo[:sp+1])
+ size += n
}
- size += n
- return nil
- })
+ todo = todo[sp+1:]
+ }
return fmt.Sprintf("%x+%d", h.Sum(nil), size)
}
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index 6d8f39dfb3..116051b09e 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -5,6 +5,7 @@
package arvados
import (
+ "crypto/tls"
"encoding/json"
"errors"
"fmt"
@@ -61,13 +62,10 @@ func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
}
type WebDAVCacheConfig struct {
- TTL Duration
- UUIDTTL Duration
- MaxBlockEntries int
- MaxCollectionEntries int
- MaxCollectionBytes int64
- MaxUUIDEntries int
- MaxSessions int
+ TTL Duration
+ DiskCacheSize ByteSizeOrPercent
+ MaxCollectionBytes ByteSize
+ MaxSessions int
}
type UploadDownloadPermission struct {
@@ -101,7 +99,12 @@ type Cluster struct {
DisabledAPIs StringSet
MaxIndexDatabaseRead int
MaxItemsPerResponse int
+ MaxConcurrentRailsRequests int
MaxConcurrentRequests int
+ MaxQueuedRequests int
+ MaxGatewayTunnels int
+ MaxQueueTimeForLockRequests Duration
+ LogCreateRequestFraction float64
MaxKeepBlobBuffers int
MaxRequestAmplification int
MaxRequestSize int
@@ -115,6 +118,7 @@ type Cluster struct {
FreezeProjectRequiresDescription bool
FreezeProjectRequiresProperties StringSet
UnfreezeProjectRequiresAdmin bool
+ LockBeforeUpdate bool
}
AuditLogs struct {
MaxAge Duration
@@ -147,12 +151,15 @@ type Cluster struct {
BalanceCollectionBuffers int
BalanceTimeout Duration
BalanceUpdateLimit int
+ BalancePullLimit int
+ BalanceTrashLimit int
WebDAVCache WebDAVCacheConfig
KeepproxyPermission UploadDownloadRolePermissions
WebDAVPermission UploadDownloadRolePermissions
WebDAVLogEvents bool
+ WebDAVOutputBuffer ByteSize
}
Git struct {
GitCommand string
@@ -165,6 +172,7 @@ type Cluster struct {
URL URL
StartTLS bool
InsecureTLS bool
+ MinTLSVersion TLSVersion
StripDomain string
AppendDomain string
SearchAttribute string
@@ -203,11 +211,12 @@ type Cluster struct {
Enable bool
Users map[string]TestUser
}
- LoginCluster string
- RemoteTokenRefresh Duration
- TokenLifetime Duration
- TrustedClients map[string]struct{}
- IssueTrustedTokens bool
+ LoginCluster string
+ RemoteTokenRefresh Duration
+ TokenLifetime Duration
+ TrustedClients map[URL]struct{}
+ TrustPrivateNetworks bool
+ IssueTrustedTokens bool
}
Mail struct {
MailchimpAPIKey string
@@ -219,9 +228,10 @@ type Cluster struct {
EmailFrom string
}
SystemLogs struct {
- LogLevel string
- Format string
- MaxRequestLogParamsSize int
+ LogLevel string
+ Format string
+ MaxRequestLogParamsSize int
+ RequestQueueDumpDirectory string
}
TLS struct {
Certificate string
@@ -251,35 +261,27 @@ type Cluster struct {
PreferDomainForUsername string
UserSetupMailText string
RoleGroupsVisibleToAll bool
+ CanCreateRoleGroups bool
+ ActivityLoggingPeriod Duration
+ SyncIgnoredGroups []string
+ SyncRequiredGroups []string
+ SyncUserAccounts bool
+ SyncUserAPITokens bool
+ SyncUserGroups bool
+ SyncUserSSHKeys bool
}
StorageClasses map[string]StorageClassConfig
Volumes map[string]Volume
Workbench struct {
- ActivationContactLink string
- APIClientConnectTimeout Duration
- APIClientReceiveTimeout Duration
- APIResponseCompression bool
- ApplicationMimetypesWithViewIcon StringSet
- ArvadosDocsite string
- ArvadosPublicDataDocURL string
- DefaultOpenIdPrefix string
- DisableSharingURLsUI bool
- EnableGettingStartedPopup bool
- EnablePublicProjectsPage bool
- FileViewersConfigURL string
- LogViewerMaxBytes ByteSize
- MultiSiteSearch string
- ProfilingEnabled bool
- Repositories bool
- RepositoryCache string
- RunningJobLogRecordsToFetch int
- SecretKeyBase string
- ShowRecentCollectionsOnDashboard bool
- ShowUserAgreementInline bool
- ShowUserNotifications bool
- SiteName string
- Theme string
- UserProfileFormFields map[string]struct {
+ ActivationContactLink string
+ ArvadosDocsite string
+ ArvadosPublicDataDocURL string
+ DisableSharingURLsUI bool
+ FileViewersConfigURL string
+ ShowUserAgreementInline bool
+ SiteName string
+ Theme string
+ UserProfileFormFields map[string]struct {
Type string
FormFieldTitle string
FormFieldDescription string
@@ -293,6 +295,7 @@ type Cluster struct {
SSHHelpPageHTML string
SSHHelpHostSuffix string
IdleTimeout Duration
+ BannerUUID string
}
}
@@ -302,12 +305,13 @@ type StorageClassConfig struct {
}
type Volume struct {
- AccessViaHosts map[URL]VolumeAccess
- ReadOnly bool
- Replication int
- StorageClasses map[string]bool
- Driver string
- DriverParameters json.RawMessage
+ AccessViaHosts map[URL]VolumeAccess
+ ReadOnly bool
+ AllowTrashWhenReadOnly bool
+ Replication int
+ StorageClasses map[string]bool
+ Driver string
+ DriverParameters json.RawMessage
}
type S3VolumeDriverParameters struct {
@@ -319,7 +323,6 @@ type S3VolumeDriverParameters struct {
Bucket string
LocationConstraint bool
V2Signature bool
- UseAWSS3v2Driver bool
IndexPageSize int
ConnectTimeout Duration
ReadTimeout Duration
@@ -396,13 +399,58 @@ func (su *URL) UnmarshalText(text []byte) error {
}
func (su URL) MarshalText() ([]byte, error) {
- return []byte(fmt.Sprintf("%s", (*url.URL)(&su).String())), nil
+ return []byte(su.String()), nil
}
func (su URL) String() string {
return (*url.URL)(&su).String()
}
+type TLSVersion uint16
+
+func (v TLSVersion) MarshalText() ([]byte, error) {
+ switch v {
+ case 0:
+ return []byte{}, nil
+ case tls.VersionTLS10:
+ return []byte("1.0"), nil
+ case tls.VersionTLS11:
+ return []byte("1.1"), nil
+ case tls.VersionTLS12:
+ return []byte("1.2"), nil
+ case tls.VersionTLS13:
+ return []byte("1.3"), nil
+ default:
+ return nil, fmt.Errorf("unsupported TLSVersion %x", v)
+ }
+}
+
+func (v *TLSVersion) UnmarshalJSON(text []byte) error {
+ if len(text) > 0 && text[0] == '"' {
+ var s string
+ err := json.Unmarshal(text, &s)
+ if err != nil {
+ return err
+ }
+ text = []byte(s)
+ }
+ switch string(text) {
+ case "":
+ *v = 0
+ case "1.0":
+ *v = tls.VersionTLS10
+ case "1.1":
+ *v = tls.VersionTLS11
+ case "1.2":
+ *v = tls.VersionTLS12
+ case "1.3":
+ *v = tls.VersionTLS13
+ default:
+ return fmt.Errorf("unsupported TLSVersion %q", text)
+ }
+ return nil
+}
+
type ServiceInstance struct {
ListenURL URL
Rendezvous string `json:",omitempty"`
@@ -449,7 +497,6 @@ type ContainersConfig struct {
DefaultKeepCacheRAM ByteSize
DispatchPrivateKey string
LogReuseDecisions bool
- MaxComputeVMs int
MaxDispatchAttempts int
MaxRetryAttempts int
MinRetryPeriod Duration
@@ -458,6 +505,7 @@ type ContainersConfig struct {
SupportedDockerImageFormats StringSet
AlwaysUsePreemptibleInstances bool
PreemptiblePriceFactor float64
+ MaximumPriceFactor float64
RuntimeEngine string
LocalKeepBlobBuffersPerVCPU int
LocalKeepLogsToContainerLog string
@@ -468,6 +516,7 @@ type ContainersConfig struct {
}
Logging struct {
MaxAge Duration
+ SweepInterval Duration
LogBytesPerEvent int
LogSecondsBetweenEvents Duration
LogThrottlePeriod Duration
@@ -497,9 +546,11 @@ type ContainersConfig struct {
}
}
LSF struct {
- BsubSudoUser string
- BsubArgumentsList []string
- BsubCUDAArguments []string
+ BsubSudoUser string
+ BsubArgumentsList []string
+ BsubCUDAArguments []string
+ MaxRunTimeOverhead Duration
+ MaxRunTimeDefault Duration
}
}
@@ -507,11 +558,16 @@ type CloudVMsConfig struct {
Enable bool
BootProbeCommand string
+ InstanceInitCommand string
DeployRunnerBinary string
+ DeployPublicKey bool
ImageID string
MaxCloudOpsPerSecond int
MaxProbesPerSecond int
MaxConcurrentInstanceCreateOps int
+ MaxInstances int
+ InitialQuotaEstimate int
+ SupervisorFraction float64
PollInterval Duration
ProbeInterval Duration
SSHPort string
@@ -535,9 +591,11 @@ type InstanceTypeMap map[string]InstanceType
var errDuplicateInstanceTypeName = errors.New("duplicate instance type name")
// UnmarshalJSON does special handling of InstanceTypes:
-// * populate computed fields (Name and Scratch)
-// * error out if InstancesTypes are populated as an array, which was
-// deprecated in Arvados 1.2.0
+//
+// - populate computed fields (Name and Scratch)
+//
+// - error out if InstancesTypes are populated as an array, which was
+// deprecated in Arvados 1.2.0
func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
fixup := func(t InstanceType) (InstanceType, error) {
if t.ProviderType == "" {
diff --git a/sdk/go/arvados/config_test.go b/sdk/go/arvados/config_test.go
index 58f4b961bb..3c65643bea 100644
--- a/sdk/go/arvados/config_test.go
+++ b/sdk/go/arvados/config_test.go
@@ -5,6 +5,7 @@
package arvados
import (
+ "crypto/tls"
"encoding/json"
"github.com/ghodss/yaml"
@@ -71,3 +72,19 @@ func (s *ConfigSuite) TestURLTrailingSlash(c *check.C) {
json.Unmarshal([]byte(`{"https://foo.example/": true}`), &b)
c.Check(a, check.DeepEquals, b)
}
+
+func (s *ConfigSuite) TestTLSVersion(c *check.C) {
+ var v struct {
+ Version TLSVersion
+ }
+ err := json.Unmarshal([]byte(`{"Version": 1.0}`), &v)
+ c.Check(err, check.IsNil)
+ c.Check(v.Version, check.Equals, TLSVersion(tls.VersionTLS10))
+
+ err = json.Unmarshal([]byte(`{"Version": "1.3"}`), &v)
+ c.Check(err, check.IsNil)
+ c.Check(v.Version, check.Equals, TLSVersion(tls.VersionTLS13))
+
+ err = json.Unmarshal([]byte(`{"Version": "1.345"}`), &v)
+ c.Check(err, check.NotNil)
+}
diff --git a/sdk/go/arvados/container.go b/sdk/go/arvados/container.go
index 466221fe19..91c8fbfe29 100644
--- a/sdk/go/arvados/container.go
+++ b/sdk/go/arvados/container.go
@@ -19,6 +19,7 @@ type Container struct {
Cwd string `json:"cwd"`
Environment map[string]string `json:"environment"`
LockedByUUID string `json:"locked_by_uuid"`
+ LockCount int `json:"lock_count"`
Mounts map[string]Mount `json:"mounts"`
Output string `json:"output"`
OutputPath string `json:"output_path"`
@@ -38,6 +39,8 @@ type Container struct {
RuntimeToken string `json:"runtime_token"`
AuthUUID string `json:"auth_uuid"`
Log string `json:"log"`
+ Cost float64 `json:"cost"`
+ SubrequestsCost float64 `json:"subrequests_cost"`
}
// ContainerRequest is an arvados#container_request resource.
@@ -77,6 +80,7 @@ type ContainerRequest struct {
ContainerCount int `json:"container_count"`
OutputStorageClasses []string `json:"output_storage_classes"`
OutputProperties map[string]interface{} `json:"output_properties"`
+ CumulativeCost float64 `json:"cumulative_cost"`
}
// Mount is special behavior to attach to a filesystem path or device.
@@ -104,11 +108,12 @@ type CUDARuntimeConstraints struct {
// RuntimeConstraints specify a container's compute resources (RAM,
// CPU) and network connectivity.
type RuntimeConstraints struct {
- API bool `json:"API"`
- RAM int64 `json:"ram"`
- VCPUs int `json:"vcpus"`
- KeepCacheRAM int64 `json:"keep_cache_ram"`
- CUDA CUDARuntimeConstraints `json:"cuda"`
+ API bool `json:"API"`
+ RAM int64 `json:"ram"`
+ VCPUs int `json:"vcpus"`
+ KeepCacheRAM int64 `json:"keep_cache_ram"`
+ KeepCacheDisk int64 `json:"keep_cache_disk"`
+ CUDA CUDARuntimeConstraints `json:"cuda"`
}
// SchedulingParameters specify a container's scheduling parameters
@@ -117,6 +122,7 @@ type SchedulingParameters struct {
Partitions []string `json:"partitions"`
Preemptible bool `json:"preemptible"`
MaxRunTime int `json:"max_run_time"`
+ Supervisor bool `json:"supervisor"`
}
// ContainerList is an arvados#containerList resource.
@@ -154,3 +160,9 @@ const (
ContainerRequestStateCommitted = ContainerRequestState("Committed")
ContainerRequestStateFinal = ContainerRequestState("Final")
)
+
+type ContainerStatus struct {
+ UUID string `json:"uuid"`
+ State ContainerState `json:"container_state"`
+ SchedulingStatus string `json:"scheduling_status"`
+}
diff --git a/sdk/go/arvados/duration.go b/sdk/go/arvados/duration.go
index c922f0a30d..9df210ccb0 100644
--- a/sdk/go/arvados/duration.go
+++ b/sdk/go/arvados/duration.go
@@ -5,6 +5,7 @@
package arvados
import (
+ "bytes"
"encoding/json"
"fmt"
"strings"
@@ -17,6 +18,13 @@ type Duration time.Duration
// UnmarshalJSON implements json.Unmarshaler.
func (d *Duration) UnmarshalJSON(data []byte) error {
+ if bytes.Equal(data, []byte(`"0"`)) || bytes.Equal(data, []byte(`0`)) {
+ // Unitless 0 is not accepted by ParseDuration, but we
+ // accept it as a reasonable spelling of 0
+ // nanoseconds.
+ *d = 0
+ return nil
+ }
if data[0] == '"' {
return d.Set(string(data[1 : len(data)-1]))
}
diff --git a/sdk/go/arvados/duration_test.go b/sdk/go/arvados/duration_test.go
index 6a198e6940..40344d061b 100644
--- a/sdk/go/arvados/duration_test.go
+++ b/sdk/go/arvados/duration_test.go
@@ -60,4 +60,14 @@ func (s *DurationSuite) TestUnmarshalJSON(c *check.C) {
err = json.Unmarshal([]byte(`{"D":"60s"}`), &d)
c.Check(err, check.IsNil)
c.Check(d.D.Duration(), check.Equals, time.Minute)
+
+ d.D = Duration(time.Second)
+ err = json.Unmarshal([]byte(`{"D":"0"}`), &d)
+ c.Check(err, check.IsNil)
+ c.Check(d.D.Duration(), check.Equals, time.Duration(0))
+
+ d.D = Duration(time.Second)
+ err = json.Unmarshal([]byte(`{"D":0}`), &d)
+ c.Check(err, check.IsNil)
+ c.Check(d.D.Duration(), check.Equals, time.Duration(0))
}
diff --git a/sdk/go/arvados/fs_base.go b/sdk/go/arvados/fs_base.go
index 2ad4d1f859..430a0d4c9b 100644
--- a/sdk/go/arvados/fs_base.go
+++ b/sdk/go/arvados/fs_base.go
@@ -13,6 +13,7 @@ import (
"net/http"
"os"
"path"
+ "path/filepath"
"strings"
"sync"
"time"
@@ -387,17 +388,28 @@ func (n *treenode) Size() int64 {
}
func (n *treenode) FileInfo() os.FileInfo {
- n.Lock()
- defer n.Unlock()
- n.fileinfo.size = int64(len(n.inodes))
- return n.fileinfo
+ n.RLock()
+ defer n.RUnlock()
+ fi := n.fileinfo
+ fi.size = int64(len(n.inodes))
+ return fi
}
func (n *treenode) Readdir() (fi []os.FileInfo, err error) {
+ // We need RLock to safely read n.inodes, but we must release
+ // it before calling FileInfo() on the child nodes. Otherwise,
+ // we risk deadlock when filter groups A and B match each
+ // other, concurrent Readdir() calls try to RLock them in
+ // opposite orders, and one cannot be RLocked a second time
+ // because a third caller is waiting for a write lock.
n.RLock()
- defer n.RUnlock()
- fi = make([]os.FileInfo, 0, len(n.inodes))
+ inodes := make([]inode, 0, len(n.inodes))
for _, inode := range n.inodes {
+ inodes = append(inodes, inode)
+ }
+ n.RUnlock()
+ fi = make([]os.FileInfo, 0, len(inodes))
+ for _, inode := range inodes {
fi = append(fi, inode.FileInfo())
}
return
@@ -420,10 +432,20 @@ func (n *treenode) Sync() error {
}
func (n *treenode) MemorySize() (size int64) {
+ // To avoid making other callers wait while we count the
+ // entire filesystem size, we lock the node only long enough
+ // to copy the list of children. We accept that the resulting
+ // size will sometimes be misleading (e.g., we will
+ // double-count an item that moves from A to B after we check
+ // A's size but before we check B's size).
n.RLock()
- defer n.RUnlock()
debugPanicIfNotLocked(n, false)
+ todo := make([]inode, 0, len(n.inodes))
for _, inode := range n.inodes {
+ todo = append(todo, inode)
+ }
+ n.RUnlock()
+ for _, inode := range todo {
size += inode.MemorySize()
}
return 64 + size
@@ -458,7 +480,8 @@ func (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*fileha
return nil, ErrSyncNotSupported
}
dirname, name := path.Split(name)
- parent, err := rlookup(fs.root, dirname)
+ ancestors := map[inode]bool{}
+ parent, err := rlookup(fs.root, dirname, ancestors)
if err != nil {
return nil, err
}
@@ -523,6 +546,24 @@ func (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*fileha
return nil, err
}
}
+ // If n and one of its parents/ancestors are [hardlinks to]
+ // the same node (e.g., a filter group that matches itself),
+ // open an "empty directory" node instead, so the inner
+ // hardlink appears empty. This is needed to ensure
+ // Open("a/b/c/x/x").Readdir() appears empty, matching the
+ // behavior of rlookup("a/b/c/x/x/z") => ErrNotExist.
+ if hl, ok := n.(*hardlink); (ok && ancestors[hl.inode]) || ancestors[n] {
+ n = &treenode{
+ fs: n.FS(),
+ parent: parent,
+ inodes: nil,
+ fileinfo: fileinfo{
+ name: name,
+ modTime: time.Now(),
+ mode: 0555 | os.ModeDir,
+ },
+ }
+ }
return &filehandle{
inode: n,
append: flag&os.O_APPEND != 0,
@@ -541,7 +582,7 @@ func (fs *fileSystem) Create(name string) (File, error) {
func (fs *fileSystem) Mkdir(name string, perm os.FileMode) error {
dirname, name := path.Split(name)
- n, err := rlookup(fs.root, dirname)
+ n, err := rlookup(fs.root, dirname, nil)
if err != nil {
return err
}
@@ -565,7 +606,7 @@ func (fs *fileSystem) Mkdir(name string, perm os.FileMode) error {
}
func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
- node, err := rlookup(fs.root, name)
+ node, err := rlookup(fs.root, name, nil)
if err != nil {
return nil, err
}
@@ -631,7 +672,15 @@ func (fs *fileSystem) Rename(oldname, newname string) error {
}
locked := map[sync.Locker]bool{}
for i := len(needLock) - 1; i >= 0; i-- {
- if n := needLock[i]; !locked[n] {
+ n := needLock[i]
+ if fs, ok := n.(interface{ rootnode() inode }); ok {
+ // Lock the fs's root dir directly, not
+ // through the fs. Otherwise our "locked" map
+ // would not reliably prevent double-locking
+ // the fs's root dir.
+ n = fs.rootnode()
+ }
+ if !locked[n] {
n.Lock()
defer n.Unlock()
locked[n] = true
@@ -686,7 +735,7 @@ func (fs *fileSystem) remove(name string, recursive bool) error {
if name == "" || name == "." || name == ".." {
return ErrInvalidArgument
}
- dir, err := rlookup(fs.root, dirname)
+ dir, err := rlookup(fs.root, dirname, nil)
if err != nil {
return err
}
@@ -723,9 +772,31 @@ func (fs *fileSystem) MemorySize() int64 {
// rlookup (recursive lookup) returns the inode for the file/directory
// with the given name (which may contain "/" separators). If no such
// file/directory exists, the returned node is nil.
-func rlookup(start inode, path string) (node inode, err error) {
+//
+// The visited map should be either nil or empty. If non-nil, all
+// nodes and hardlink targets visited by the given path will be added
+// to it.
+//
+// If a cycle is detected, the second occurrence of the offending node
+// will be replaced by an empty directory. For example, if "x" is a
+// filter group that matches itself, then rlookup("a/b/c/x") will
+// return the filter group, and rlookup("a/b/c/x/x") will return an
+// empty directory.
+func rlookup(start inode, path string, visited map[inode]bool) (node inode, err error) {
+ if visited == nil {
+ visited = map[inode]bool{}
+ }
node = start
+ // Clean up ./ and ../ and double-slashes, but (unlike
+ // filepath.Clean) retain a trailing slash, because looking up
+ // ".../regularfile/" should fail.
+ trailingSlash := strings.HasSuffix(path, "/")
+ path = filepath.Clean(path)
+ if trailingSlash && path != "/" {
+ path += "/"
+ }
for _, name := range strings.Split(path, "/") {
+ visited[node] = true
if node.IsDir() {
if name == "." || name == "" {
continue
@@ -743,6 +814,24 @@ func rlookup(start inode, path string) (node inode, err error) {
if node == nil || err != nil {
break
}
+ checknode := node
+ if hardlinked, ok := checknode.(*hardlink); ok {
+ checknode = hardlinked.inode
+ }
+ if visited[checknode] {
+ node = &treenode{
+ fs: node.FS(),
+ parent: node.Parent(),
+ inodes: nil,
+ fileinfo: fileinfo{
+ name: name,
+ modTime: time.Now(),
+ mode: 0555 | os.ModeDir,
+ },
+ }
+ } else {
+ visited[checknode] = true
+ }
}
if node == nil && err == nil {
err = os.ErrNotExist
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index 26012e2406..101fade74b 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -44,9 +44,17 @@ type CollectionFileSystem interface {
type collectionFileSystem struct {
fileSystem
uuid string
- savedPDH atomic.Value
replicas int
storageClasses []string
+
+ // PDH returned by the server as of last sync/load.
+ loadedPDH atomic.Value
+ // PDH of the locally generated manifest as of last
+ // sync/load. This can differ from loadedPDH after loading a
+ // version that was generated with different code and sorts
+ // filenames differently than we do, for example.
+ savedPDH atomic.Value
+
// guessSignatureTTL tracks a lower bound for the server's
// configured BlobSigningTTL. The guess is initially zero, and
// increases when we come across a signature with an expiry
@@ -74,7 +82,7 @@ func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFile
thr: newThrottle(concurrentWriters),
},
}
- fs.savedPDH.Store(c.PortableDataHash)
+ fs.loadedPDH.Store(c.PortableDataHash)
if r := c.ReplicationDesired; r != nil {
fs.replicas = *r
}
@@ -94,6 +102,13 @@ func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFile
if err := root.loadManifest(c.ManifestText); err != nil {
return nil, err
}
+
+ txt, err := root.marshalManifest(context.Background(), ".", false)
+ if err != nil {
+ return nil, err
+ }
+ fs.savedPDH.Store(PortableDataHash(txt))
+
backdateTree(root, modTime)
fs.root = root
return fs, nil
@@ -290,44 +305,72 @@ func (fs *collectionFileSystem) Truncate(int64) error {
return ErrInvalidOperation
}
-// Check for and incorporate upstream changes -- unless that has
-// already been done recently, in which case this func is a no-op.
-func (fs *collectionFileSystem) checkChangesOnServer() error {
- if fs.uuid == "" && fs.savedPDH.Load() == "" {
- return nil
+// Check for and incorporate upstream changes. If force==false, this
+// is a no-op except once every ttl/100 or so.
+//
+// Return value is true if new content was loaded from upstream and
+// any unsaved local changes have been discarded.
+func (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) {
+ if fs.uuid == "" && fs.loadedPDH.Load() == "" {
+ return false, nil
}
- // First try UUID if any, then last known PDH. Stop if all
- // signatures are new enough.
- checkingAll := false
- for _, id := range []string{fs.uuid, fs.savedPDH.Load().(string)} {
- if id == "" {
- continue
- }
-
- fs.lockCheckChanges.Lock()
- if !checkingAll && fs.holdCheckChanges.After(time.Now()) {
- fs.lockCheckChanges.Unlock()
- return nil
- }
- remain, ttl := fs.signatureTimeLeft()
- if remain > 0.01 && !checkingAll {
- fs.holdCheckChanges = time.Now().Add(ttl / 100)
- }
+ fs.lockCheckChanges.Lock()
+ if !force && fs.holdCheckChanges.After(time.Now()) {
fs.lockCheckChanges.Unlock()
+ return false, nil
+ }
+ remain, ttl := fs.signatureTimeLeft()
+ if remain > 0.01 {
+ fs.holdCheckChanges = time.Now().Add(ttl / 100)
+ }
+ fs.lockCheckChanges.Unlock()
- if remain >= 0.5 {
- break
+ if !force && remain >= 0.5 {
+ // plenty of time left on current signatures
+ return false, nil
+ }
+
+ loadedPDH, _ := fs.loadedPDH.Load().(string)
+ getparams := map[string]interface{}{"select": []string{"portable_data_hash", "manifest_text"}}
+ if fs.uuid != "" {
+ var coll Collection
+ err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+fs.uuid, nil, getparams)
+ if err != nil {
+ return false, err
+ }
+ if coll.PortableDataHash != loadedPDH {
+ // collection has changed upstream since we
+ // last loaded or saved. Refresh local data,
+ // losing any unsaved local changes.
+ newfs, err := coll.FileSystem(fs.fileSystem.fsBackend, fs.fileSystem.fsBackend)
+ if err != nil {
+ return false, err
+ }
+ snap, err := Snapshot(newfs, "/")
+ if err != nil {
+ return false, err
+ }
+ err = Splice(fs, "/", snap)
+ if err != nil {
+ return false, err
+ }
+ fs.loadedPDH.Store(coll.PortableDataHash)
+ fs.savedPDH.Store(newfs.(*collectionFileSystem).savedPDH.Load())
+ return true, nil
}
- checkingAll = true
+ fs.updateSignatures(coll.ManifestText)
+ return false, nil
+ }
+ if loadedPDH != "" {
var coll Collection
- err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+id, nil, map[string]interface{}{"select": []string{"portable_data_hash", "manifest_text"}})
+ err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+loadedPDH, nil, getparams)
if err != nil {
- continue
+ return false, err
}
fs.updateSignatures(coll.ManifestText)
}
- return nil
+ return false, nil
}
// Refresh signature on a single locator, if necessary. Assume caller
@@ -339,11 +382,12 @@ func (fs *collectionFileSystem) refreshSignature(locator string) string {
if err != nil || exp.Sub(time.Now()) > time.Minute {
// Synchronous update is not needed. Start an
// asynchronous update if needed.
- go fs.checkChangesOnServer()
+ go fs.checkChangesOnServer(false)
return locator
}
+ loadedPDH, _ := fs.loadedPDH.Load().(string)
var manifests string
- for _, id := range []string{fs.uuid, fs.savedPDH.Load().(string)} {
+ for _, id := range []string{fs.uuid, loadedPDH} {
if id == "" {
continue
}
@@ -368,18 +412,19 @@ func (fs *collectionFileSystem) refreshSignature(locator string) string {
}
func (fs *collectionFileSystem) Sync() error {
- err := fs.checkChangesOnServer()
+ refreshed, err := fs.checkChangesOnServer(true)
if err != nil {
return err
}
- if fs.uuid == "" {
+ if refreshed || fs.uuid == "" {
return nil
}
txt, err := fs.MarshalManifest(".")
if err != nil {
return fmt.Errorf("sync failed: %s", err)
}
- if PortableDataHash(txt) == fs.savedPDH.Load() {
+ savingPDH := PortableDataHash(txt)
+ if savingPDH == fs.savedPDH.Load() {
// No local changes since last save or initial load.
return nil
}
@@ -403,15 +448,16 @@ func (fs *collectionFileSystem) Sync() error {
"select": selectFields,
})
if err != nil {
- return fmt.Errorf("sync failed: update %s: %s", fs.uuid, err)
+ return fmt.Errorf("sync failed: update %s: %w", fs.uuid, err)
}
fs.updateSignatures(coll.ManifestText)
- fs.savedPDH.Store(coll.PortableDataHash)
+ fs.loadedPDH.Store(coll.PortableDataHash)
+ fs.savedPDH.Store(savingPDH)
return nil
}
func (fs *collectionFileSystem) Flush(path string, shortBlocks bool) error {
- node, err := rlookup(fs.fileSystem.root, path)
+ node, err := rlookup(fs.fileSystem.root, path, nil)
if err != nil {
return err
}
@@ -443,15 +489,13 @@ func (fs *collectionFileSystem) Flush(path string, shortBlocks bool) error {
}
func (fs *collectionFileSystem) MemorySize() int64 {
- fs.fileSystem.root.Lock()
- defer fs.fileSystem.root.Unlock()
return fs.fileSystem.root.(*dirnode).MemorySize()
}
func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {
fs.fileSystem.root.Lock()
defer fs.fileSystem.root.Unlock()
- return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix)
+ return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix, true)
}
func (fs *collectionFileSystem) Size() int64 {
@@ -489,9 +533,9 @@ type filenodePtr struct {
//
// After seeking:
//
-// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF
-// ||
-// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff
+// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF
+// ||
+// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff
func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
ptr = startPtr
if ptr.off < 0 {
@@ -576,6 +620,16 @@ func (fn *filenode) FS() FileSystem {
return fn.fs
}
+func (fn *filenode) MemorySize() (size int64) {
+ fn.RLock()
+ defer fn.RUnlock()
+ size = 64
+ for _, seg := range fn.segments {
+ size += seg.memorySize()
+ }
+ return
+}
+
// Read reads file data from a single segment, starting at startPtr,
// into p. startPtr is assumed not to be up-to-date. Caller must have
// RLock or Lock.
@@ -1150,27 +1204,18 @@ func (dn *dirnode) flush(ctx context.Context, names []string, opts flushOpts) er
return cg.Wait()
}
-// caller must have write lock.
func (dn *dirnode) MemorySize() (size int64) {
- for _, name := range dn.sortedNames() {
- node := dn.inodes[name]
- node.Lock()
- defer node.Unlock()
- switch node := node.(type) {
- case *dirnode:
- size += node.MemorySize()
- case *filenode:
- size += 64
- for _, seg := range node.segments {
- switch seg := seg.(type) {
- case *memSegment:
- size += int64(seg.Len())
- }
- size += 64
- }
- }
+ dn.RLock()
+ todo := make([]inode, 0, len(dn.inodes))
+ for _, node := range dn.inodes {
+ todo = append(todo, node)
}
- return 64 + size
+ dn.RUnlock()
+ size = 64
+ for _, node := range todo {
+ size += node.MemorySize()
+ }
+ return
}
// caller must have write lock.
@@ -1184,7 +1229,7 @@ func (dn *dirnode) sortedNames() []string {
}
// caller must have write lock.
-func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string, error) {
+func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, flush bool) (string, error) {
cg := newContextGroup(ctx)
defer cg.Cancel()
@@ -1231,7 +1276,7 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string,
for i, name := range dirnames {
i, name := i, name
cg.Go(func() error {
- txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name)
+ txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name, flush)
subdirs[i] = txt
return err
})
@@ -1247,7 +1292,10 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string,
var fileparts []filepart
var blocks []string
- if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil {
+ if !flush {
+ // skip flush -- will fail below if anything
+ // needed flushing
+ } else if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil {
return err
}
for _, name := range filenames {
@@ -1278,10 +1326,12 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string,
}
streamLen += int64(seg.size)
default:
- // This can't happen: we
- // haven't unlocked since
+ // We haven't unlocked since
// calling flush(sync=true).
- panic(fmt.Sprintf("can't marshal segment type %T", seg))
+ // Evidently the caller passed
+ // flush==false but there were
+ // local changes.
+ return fmt.Errorf("can't marshal segment type %T", seg)
}
}
}
@@ -1308,6 +1358,10 @@ func (dn *dirnode) loadManifest(txt string) error {
}
streams = streams[:len(streams)-1]
segments := []storedSegment{}
+ // streamoffset[n] is the position in the stream of the nth
+ // block, i.e., â segments[j].size â 0â¤j offset {
- // Can't continue where we left off.
- // TODO: binary search instead of
- // rewinding all the way (but this
- // situation might be rare anyway)
- segIdx, pos = 0, 0
+ if segIdx < len(segments) && streamoffset[segIdx] <= offset && streamoffset[segIdx+1] > offset {
+ // common case with an easy
+ // optimization: start where the
+ // previous segment ended
+ } else if guess := int(offset >> 26); guess >= 0 && guess < len(segments) && streamoffset[guess] <= offset && streamoffset[guess+1] > offset {
+ // another common case with an easy
+ // optimization: all blocks are 64 MiB
+ // (or close enough)
+ segIdx = guess
+ } else {
+ // general case
+ segIdx = sort.Search(len(segments), func(i int) bool {
+ return streamoffset[i+1] > offset
+ })
}
for ; segIdx < len(segments); segIdx++ {
- seg := segments[segIdx]
- next := pos + int64(seg.Len())
- if next <= offset || seg.Len() == 0 {
- pos = next
- continue
- }
- if pos >= offset+length {
+ blkStart := streamoffset[segIdx]
+ if blkStart >= offset+length {
break
}
+ seg := &segments[segIdx]
+ if seg.size == 0 {
+ continue
+ }
var blkOff int
- if pos < offset {
- blkOff = int(offset - pos)
+ if blkStart < offset {
+ blkOff = int(offset - blkStart)
}
- blkLen := seg.Len() - blkOff
- if pos+int64(blkOff+blkLen) > offset+length {
- blkLen = int(offset + length - pos - int64(blkOff))
+ blkLen := seg.size - blkOff
+ if blkStart+int64(seg.size) > offset+length {
+ blkLen = int(offset + length - blkStart - int64(blkOff))
}
fnode.appendSegment(storedSegment{
kc: dn.fs,
@@ -1432,14 +1503,9 @@ func (dn *dirnode) loadManifest(txt string) error {
offset: blkOff,
length: blkLen,
})
- if next > offset+length {
- break
- } else {
- pos = next
- }
}
- if segIdx == len(segments) && pos < offset+length {
- return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
+ if segIdx == len(segments) && streamoffset[segIdx] < offset+length {
+ return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, streamoffset[segIdx], token)
}
}
if !anyFileTokens {
@@ -1627,6 +1693,7 @@ type segment interface {
// Return a new segment with a subsection of the data from this
// one. length<0 means length=Len()-off.
Slice(off int, length int) segment
+ memorySize() int64
}
type memSegment struct {
@@ -1705,6 +1772,10 @@ func (me *memSegment) ReadAt(p []byte, off int64) (n int, err error) {
return
}
+func (me *memSegment) memorySize() int64 {
+ return 64 + int64(len(me.buf))
+}
+
type storedSegment struct {
kc fsBackend
locator string
@@ -1742,6 +1813,10 @@ func (se storedSegment) ReadAt(p []byte, off int64) (n int, err error) {
return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
}
+func (se storedSegment) memorySize() int64 {
+ return 64 + int64(len(se.locator))
+}
+
func canonicalName(name string) string {
name = path.Clean("/" + name)
if name == "/" || name == "./" {
diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go
index c2cac3c6ce..b57f9aa30f 100644
--- a/sdk/go/arvados/fs_collection_test.go
+++ b/sdk/go/arvados/fs_collection_test.go
@@ -124,6 +124,38 @@ func (s *CollectionFSSuite) SetUpTest(c *check.C) {
c.Assert(err, check.IsNil)
}
+func (s *CollectionFSSuite) TestSyncNonCanonicalManifest(c *check.C) {
+ var coll Collection
+ err := s.client.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+fixtureFooAndBarFilesInDirUUID, nil, nil)
+ c.Assert(err, check.IsNil)
+ mtxt := strings.Replace(coll.ManifestText, "3:3:bar 0:3:foo", "0:3:foo 3:3:bar", -1)
+ c.Assert(mtxt, check.Not(check.Equals), coll.ManifestText)
+ err = s.client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]interface{}{
+ "manifest_text": mtxt}})
+ c.Assert(err, check.IsNil)
+ // In order for the rest of the test to work as intended, the API server
+ // needs to retain the file ordering we set manually. We check that here.
+ // We can't check `mtxt == coll.ManifestText` because the API server
+ // might've returned new block signatures if the GET and POST happened in
+ // different seconds.
+ expectPattern := `\./dir1 \S+ 0:3:foo 3:3:bar\n`
+ c.Assert(coll.ManifestText, check.Matches, expectPattern)
+
+ fs, err := coll.FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+ err = fs.Sync()
+ c.Check(err, check.IsNil)
+
+ // fs had no local changes, so Sync should not have saved
+ // anything back to the API/database. (If it did, we would see
+ // the manifest rewritten in canonical order.)
+ var saved Collection
+ err = s.client.RequestAndDecode(&saved, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
+ c.Assert(err, check.IsNil)
+ c.Check(saved.ManifestText, check.Matches, expectPattern)
+}
+
func (s *CollectionFSSuite) TestHttpFileSystemInterface(c *check.C) {
_, ok := s.fs.(http.FileSystem)
c.Check(ok, check.Equals, true)
@@ -1209,11 +1241,12 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
}
nDirs := int64(8)
+ nFiles := int64(67)
megabyte := make([]byte, 1<<20)
for i := int64(0); i < nDirs; i++ {
dir := fmt.Sprintf("dir%d", i)
fs.Mkdir(dir, 0755)
- for j := 0; j < 67; j++ {
+ for j := int64(0); j < nFiles; j++ {
f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
c.Assert(err, check.IsNil)
defer f.Close()
@@ -1221,8 +1254,8 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
c.Assert(err, check.IsNil)
}
}
- inodebytes := int64((nDirs*(67*2+1) + 1) * 64)
- c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20)+inodebytes)
+ inodebytes := int64((nDirs*(nFiles+1) + 1) * 64)
+ c.Check(fs.MemorySize(), check.Equals, nDirs*nFiles*(1<<20+64)+inodebytes)
c.Check(flushed, check.Equals, int64(0))
waitForFlush := func(expectUnflushed, expectFlushed int64) {
@@ -1233,27 +1266,29 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
}
// Nothing flushed yet
- waitForFlush((nDirs*67)<<20+inodebytes, 0)
+ waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)
// Flushing a non-empty dir "/" is non-recursive and there are
// no top-level files, so this has no effect
fs.Flush("/", false)
- waitForFlush((nDirs*67)<<20+inodebytes, 0)
+ waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)
// Flush the full block in dir0
fs.Flush("dir0", false)
- waitForFlush((nDirs*67-64)<<20+inodebytes, 64<<20)
+ bigloclen := int64(32 + 9 + 51 + 64) // md5 + "+" + "67xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize)
+ waitForFlush((nDirs*nFiles-64)*(1<<20+64)+inodebytes+bigloclen*64, 64<<20)
err = fs.Flush("dir-does-not-exist", false)
c.Check(err, check.NotNil)
// Flush full blocks in all dirs
fs.Flush("", false)
- waitForFlush(nDirs*3<<20+inodebytes, nDirs*64<<20)
+ waitForFlush(nDirs*3*(1<<20+64)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20)
// Flush non-full blocks, too
fs.Flush("", true)
- waitForFlush(inodebytes, nDirs*67<<20)
+ smallloclen := int64(32 + 8 + 51 + 64) // md5 + "+" + "3xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize)
+ waitForFlush(inodebytes+bigloclen*64*nDirs+smallloclen*3*nDirs, nDirs*67<<20)
}
// Even when writing lots of files/dirs from different goroutines, as
@@ -1604,48 +1639,101 @@ type CollectionFSUnitSuite struct{}
var _ = check.Suite(&CollectionFSUnitSuite{})
// expect ~2 seconds to load a manifest with 256K files
-func (s *CollectionFSUnitSuite) TestLargeManifest(c *check.C) {
+func (s *CollectionFSUnitSuite) TestLargeManifest_ManyFiles(c *check.C) {
if testing.Short() {
c.Skip("slow")
}
+ s.testLargeManifest(c, 512, 512, 1, 0)
+}
- const (
- dirCount = 512
- fileCount = 512
- )
+func (s *CollectionFSUnitSuite) TestLargeManifest_LargeFiles(c *check.C) {
+ if testing.Short() {
+ c.Skip("slow")
+ }
+ s.testLargeManifest(c, 1, 800, 1000, 0)
+}
+
+func (s *CollectionFSUnitSuite) TestLargeManifest_InterleavedFiles(c *check.C) {
+ if testing.Short() {
+ c.Skip("slow")
+ }
+ // Timing figures here are from a dev host, (0)->(1)->(2)->(3)
+ // (0) no optimizations (main branch commit ea697fb1e8)
+ // (1) resolve streampos->blkidx with binary search
+ // (2) ...and rewrite PortableDataHash() without regexp
+ // (3) ...and use fnodeCache in loadManifest
+ s.testLargeManifest(c, 1, 800, 100, 4<<20) // 127s -> 12s -> 2.5s -> 1.5s
+ s.testLargeManifest(c, 1, 50, 1000, 4<<20) // 44s -> 10s -> 1.5s -> 0.8s
+ s.testLargeManifest(c, 1, 200, 100, 4<<20) // 13s -> 4s -> 0.6s -> 0.3s
+ s.testLargeManifest(c, 1, 200, 150, 4<<20) // 26s -> 4s -> 1s -> 0.5s
+ s.testLargeManifest(c, 1, 200, 200, 4<<20) // 38s -> 6s -> 1.3s -> 0.7s
+ s.testLargeManifest(c, 1, 200, 225, 4<<20) // 46s -> 7s -> 1.5s -> 1s
+ s.testLargeManifest(c, 1, 400, 400, 4<<20) // 477s -> 24s -> 5s -> 3s
+ // s.testLargeManifest(c, 1, 800, 1000, 4<<20) // timeout -> 186s -> 28s -> 17s
+}
+func (s *CollectionFSUnitSuite) testLargeManifest(c *check.C, dirCount, filesPerDir, blocksPerFile, interleaveChunk int) {
+ t0 := time.Now()
+ const blksize = 1 << 26
+ c.Logf("%s building manifest with dirCount=%d filesPerDir=%d blocksPerFile=%d", time.Now(), dirCount, filesPerDir, blocksPerFile)
mb := bytes.NewBuffer(make([]byte, 0, 40000000))
+ blkid := 0
for i := 0; i < dirCount; i++ {
fmt.Fprintf(mb, "./dir%d", i)
- for j := 0; j <= fileCount; j++ {
- fmt.Fprintf(mb, " %032x+42+A%040x@%08x", j, j, j)
+ for j := 0; j < filesPerDir; j++ {
+ for k := 0; k < blocksPerFile; k++ {
+ blkid++
+ fmt.Fprintf(mb, " %032x+%d+A%040x@%08x", blkid, blksize, blkid, blkid)
+ }
}
- for j := 0; j < fileCount; j++ {
- fmt.Fprintf(mb, " %d:%d:dir%d/file%d", j*42+21, 42, j, j)
+ for j := 0; j < filesPerDir; j++ {
+ if interleaveChunk == 0 {
+ fmt.Fprintf(mb, " %d:%d:dir%d/file%d", (filesPerDir-j-1)*blocksPerFile*blksize, blocksPerFile*blksize, j, j)
+ continue
+ }
+ for todo := int64(blocksPerFile) * int64(blksize); todo > 0; todo -= int64(interleaveChunk) {
+ size := int64(interleaveChunk)
+ if size > todo {
+ size = todo
+ }
+ offset := rand.Int63n(int64(blocksPerFile)*int64(blksize)*int64(filesPerDir) - size)
+ fmt.Fprintf(mb, " %d:%d:dir%d/file%d", offset, size, j, j)
+ }
}
mb.Write([]byte{'\n'})
}
coll := Collection{ManifestText: mb.String()}
- c.Logf("%s built", time.Now())
+ c.Logf("%s built manifest size=%d", time.Now(), mb.Len())
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
c.Logf("%s Alloc=%d Sys=%d", time.Now(), memstats.Alloc, memstats.Sys)
- f, err := coll.FileSystem(nil, nil)
+ f, err := coll.FileSystem(NewClientFromEnv(), &keepClientStub{})
c.Check(err, check.IsNil)
c.Logf("%s loaded", time.Now())
- c.Check(f.Size(), check.Equals, int64(42*dirCount*fileCount))
+ c.Check(f.Size(), check.Equals, int64(dirCount*filesPerDir*blocksPerFile*blksize))
+ // Stat() and OpenFile() each file. This mimics the behavior
+ // of webdav propfind, which opens each file even when just
+ // listing directory entries.
for i := 0; i < dirCount; i++ {
- for j := 0; j < fileCount; j++ {
- f.Stat(fmt.Sprintf("./dir%d/dir%d/file%d", i, j, j))
+ for j := 0; j < filesPerDir; j++ {
+ fnm := fmt.Sprintf("./dir%d/dir%d/file%d", i, j, j)
+ fi, err := f.Stat(fnm)
+ c.Assert(err, check.IsNil)
+ c.Check(fi.IsDir(), check.Equals, false)
+ f, err := f.OpenFile(fnm, os.O_RDONLY, 0)
+ c.Assert(err, check.IsNil)
+ f.Close()
}
}
- c.Logf("%s Stat() x %d", time.Now(), dirCount*fileCount)
+ c.Logf("%s OpenFile() x %d", time.Now(), dirCount*filesPerDir)
runtime.ReadMemStats(&memstats)
c.Logf("%s Alloc=%d Sys=%d", time.Now(), memstats.Alloc, memstats.Sys)
+ c.Logf("%s MemorySize=%d", time.Now(), f.MemorySize())
+ c.Logf("%s ... test duration %s", time.Now(), time.Now().Sub(t0))
}
// Gocheck boilerplate
diff --git a/sdk/go/arvados/fs_deferred.go b/sdk/go/arvados/fs_deferred.go
index 1dfa2df6e4..e85446098f 100644
--- a/sdk/go/arvados/fs_deferred.go
+++ b/sdk/go/arvados/fs_deferred.go
@@ -5,45 +5,10 @@
package arvados
import (
- "log"
"os"
"sync"
- "time"
)
-func deferredCollectionFS(fs FileSystem, parent inode, coll Collection) inode {
- modTime := coll.ModifiedAt
- if modTime.IsZero() {
- modTime = time.Now()
- }
- placeholder := &treenode{
- fs: fs,
- parent: parent,
- inodes: nil,
- fileinfo: fileinfo{
- name: coll.Name,
- modTime: modTime,
- mode: 0755 | os.ModeDir,
- sys: func() interface{} { return &coll },
- },
- }
- return &deferrednode{wrapped: placeholder, create: func() inode {
- err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
- if err != nil {
- log.Printf("BUG: unhandled error: %s", err)
- return placeholder
- }
- newfs, err := coll.FileSystem(fs, fs)
- if err != nil {
- log.Printf("BUG: unhandled error: %s", err)
- return placeholder
- }
- cfs := newfs.(*collectionFileSystem)
- cfs.SetParent(parent, coll.Name)
- return cfs
- }}
-}
-
// A deferrednode wraps an inode that's expensive to build. Initially,
// it responds to basic directory functions by proxying to the given
// placeholder. If a caller uses a read/write/lock operation,
diff --git a/sdk/go/arvados/fs_lookup.go b/sdk/go/arvados/fs_lookup.go
index 2bb09995e1..7f22449318 100644
--- a/sdk/go/arvados/fs_lookup.go
+++ b/sdk/go/arvados/fs_lookup.go
@@ -48,7 +48,19 @@ func (ln *lookupnode) Readdir() ([]os.FileInfo, error) {
return nil, err
}
for _, child := range all {
- _, err = ln.treenode.Child(child.FileInfo().Name(), func(inode) (inode, error) {
+ var name string
+ if hl, ok := child.(*hardlink); ok && hl.inode == ln {
+ // If child is a hardlink to its
+ // parent, FileInfo()->RLock() will
+ // deadlock, because we already have
+ // the write lock. In this situation
+ // we can safely access the hardlink's
+ // name directly.
+ name = hl.name
+ } else {
+ name = child.FileInfo().Name()
+ }
+ _, err = ln.treenode.Child(name, func(inode) (inode, error) {
return child, nil
})
if err != nil {
diff --git a/sdk/go/arvados/fs_project.go b/sdk/go/arvados/fs_project.go
index bea1f76e24..df1d06e753 100644
--- a/sdk/go/arvados/fs_project.go
+++ b/sdk/go/arvados/fs_project.go
@@ -6,7 +6,9 @@ package arvados
import (
"log"
+ "os"
"strings"
+ "time"
)
func (fs *customFileSystem) defaultUUID(uuid string) (string, error) {
@@ -33,10 +35,11 @@ func (fs *customFileSystem) projectsLoadOne(parent inode, uuid, name string) (in
contents = CollectionList{}
err = fs.RequestAndDecode(&contents, "GET", "arvados/v1/groups/"+uuid+"/contents", nil, ResourceListParams{
Count: "none",
+ Order: "uuid",
Filters: []Filter{
{"name", "=", strings.Replace(name, subst, "/", -1)},
{"uuid", "is_a", []string{"arvados#collection", "arvados#group"}},
- {"groups.group_class", "=", "project"},
+ {"groups.group_class", "in", []string{"project", "filter"}},
},
Select: []string{"uuid", "name", "modified_at", "properties"},
})
@@ -64,9 +67,18 @@ func (fs *customFileSystem) projectsLoadOne(parent inode, uuid, name string) (in
if strings.Contains(coll.UUID, "-j7d0g-") {
// Group item was loaded into a Collection var -- but
// we only need the Name and UUID anyway, so it's OK.
- return fs.newProjectNode(parent, coll.Name, coll.UUID, nil), nil
+ return &hardlink{
+ inode: fs.projectSingleton(coll.UUID, &Group{
+ UUID: coll.UUID,
+ Name: coll.Name,
+ ModifiedAt: coll.ModifiedAt,
+ Properties: coll.Properties,
+ }),
+ parent: parent,
+ name: coll.Name,
+ }, nil
} else if strings.Contains(coll.UUID, "-4zz18-") {
- return deferredCollectionFS(fs, parent, coll), nil
+ return fs.newDeferredCollectionDir(parent, name, coll.UUID, coll.ModifiedAt, coll.Properties), nil
} else {
log.Printf("group contents: unrecognized UUID in response: %q", coll.UUID)
return nil, ErrInvalidArgument
@@ -79,6 +91,7 @@ func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode,
return nil, err
}
+ pagesize := 100000
var inodes []inode
// When #17424 is resolved, remove the outer loop here and use
@@ -92,7 +105,7 @@ func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode,
{"uuid", "is_a", class},
}
if class == "arvados#group" {
- filters = append(filters, Filter{"group_class", "=", "project"})
+ filters = append(filters, Filter{"groups.group_class", "in", []string{"project", "filter"}})
}
params := ResourceListParams{
@@ -100,13 +113,18 @@ func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode,
Filters: filters,
Order: "uuid",
Select: []string{"uuid", "name", "modified_at", "properties"},
+ Limit: &pagesize,
}
for {
- // The groups content endpoint returns Collection and Group (project)
- // objects. This function only accesses the UUID and Name field. Both
- // collections and groups have those fields, so it is easier to just treat
- // the ObjectList that comes back as a CollectionList.
+ // The groups content endpoint returns
+ // Collection and Group (project)
+ // objects. This function only accesses the
+ // UUID, Name, and ModifiedAt fields. Both
+ // collections and groups have those fields,
+ // so it is easier to just treat the
+ // ObjectList that comes back as a
+ // CollectionList.
var resp CollectionList
err = fs.RequestAndDecode(&resp, "GET", "arvados/v1/groups/"+uuid+"/contents", nil, params)
if err != nil {
@@ -123,14 +141,14 @@ func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode,
continue
}
if strings.Contains(i.UUID, "-j7d0g-") {
- inodes = append(inodes, fs.newProjectNode(parent, i.Name, i.UUID, &Group{
+ inodes = append(inodes, fs.newProjectDir(parent, i.Name, i.UUID, &Group{
UUID: i.UUID,
Name: i.Name,
ModifiedAt: i.ModifiedAt,
Properties: i.Properties,
}))
} else if strings.Contains(i.UUID, "-4zz18-") {
- inodes = append(inodes, deferredCollectionFS(fs, parent, i))
+ inodes = append(inodes, fs.newDeferredCollectionDir(parent, i.Name, i.UUID, i.ModifiedAt, i.Properties))
} else {
log.Printf("group contents: unrecognized UUID in response: %q", i.UUID)
return nil, ErrInvalidArgument
@@ -141,3 +159,32 @@ func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode,
}
return inodes, nil
}
+
+func (fs *customFileSystem) newProjectDir(parent inode, name, uuid string, proj *Group) inode {
+ return &hardlink{inode: fs.projectSingleton(uuid, proj), parent: parent, name: name}
+}
+
+func (fs *customFileSystem) newDeferredCollectionDir(parent inode, name, uuid string, modTime time.Time, props map[string]interface{}) inode {
+ if modTime.IsZero() {
+ modTime = time.Now()
+ }
+ placeholder := &treenode{
+ fs: fs,
+ parent: parent,
+ inodes: nil,
+ fileinfo: fileinfo{
+ name: name,
+ modTime: modTime,
+ mode: 0755 | os.ModeDir,
+ sys: func() interface{} { return &Collection{UUID: uuid, Name: name, ModifiedAt: modTime, Properties: props} },
+ },
+ }
+ return &deferrednode{wrapped: placeholder, create: func() inode {
+ node, err := fs.collectionSingleton(uuid)
+ if err != nil {
+ log.Printf("BUG: unhandled error: %s", err)
+ return placeholder
+ }
+ return &hardlink{inode: node, parent: parent, name: name}
+ }}
+}
diff --git a/sdk/go/arvados/fs_project_test.go b/sdk/go/arvados/fs_project_test.go
index 8e7f588156..5c2eb33d12 100644
--- a/sdk/go/arvados/fs_project_test.go
+++ b/sdk/go/arvados/fs_project_test.go
@@ -10,7 +10,6 @@ import (
"errors"
"io"
"os"
- "path/filepath"
"strings"
check "gopkg.in/check.v1"
@@ -43,73 +42,108 @@ func (sc *spyingClient) RequestAndDecode(dst interface{}, method, path string, b
func (s *SiteFSSuite) TestFilterGroup(c *check.C) {
// Make sure that a collection and group that match the filter are present,
// and that a group that does not match the filter is not present.
- s.fs.MountProject("fg", fixtureThisFilterGroupUUID)
- _, err := s.fs.OpenFile("/fg/baz_file", 0, 0)
- c.Assert(err, check.IsNil)
+ checkOpen := func(path string, exists bool) {
+ f, err := s.fs.Open(path)
+ if exists {
+ if c.Check(err, check.IsNil) {
+ c.Check(f.Close(), check.IsNil)
+ }
+ } else {
+ c.Check(err, check.Equals, os.ErrNotExist)
+ }
+ }
- _, err = s.fs.OpenFile("/fg/A Subproject", 0, 0)
- c.Assert(err, check.IsNil)
+ checkDirContains := func(parent, child string, exists bool) {
+ f, err := s.fs.Open(parent)
+ if !c.Check(err, check.IsNil) {
+ return
+ }
+ ents, err := f.Readdir(-1)
+ if !c.Check(err, check.IsNil) {
+ return
+ }
+ for _, ent := range ents {
+ if !exists {
+ c.Check(ent.Name(), check.Not(check.Equals), child)
+ if child == "" {
+ // no children are expected
+ c.Errorf("child %q found in parent %q", child, parent)
+ }
+ } else if ent.Name() == child {
+ return
+ }
+ }
+ if exists {
+ c.Errorf("child %q not found in parent %q", child, parent)
+ }
+ }
- _, err = s.fs.OpenFile("/fg/A Project", 0, 0)
- c.Assert(err, check.Not(check.IsNil))
+ checkOpen("/users/active/This filter group/baz_file", true)
+ checkOpen("/users/active/This filter group/A Subproject", true)
+ checkOpen("/users/active/This filter group/A Project", false)
+ s.fs.MountProject("fg", fixtureThisFilterGroupUUID)
+ checkOpen("/fg/baz_file", true)
+ checkOpen("/fg/A Subproject", true)
+ checkOpen("/fg/A Project", false)
+ s.fs.MountProject("home", "")
+ checkOpen("/home/A filter group with an is_a collection filter/baz_file", true)
+ checkOpen("/home/A filter group with an is_a collection filter/baz_file/baz", true)
+ checkOpen("/home/A filter group with an is_a collection filter/A Subproject", false)
+ checkOpen("/home/A filter group with an is_a collection filter/A Project", false)
// An empty filter means everything that is visible should be returned.
+ checkOpen("/users/active/A filter group without filters/baz_file", true)
+ checkOpen("/users/active/A filter group without filters/A Subproject", true)
+ checkOpen("/users/active/A filter group without filters/A Project", true)
s.fs.MountProject("fg2", fixtureAFilterGroupTwoUUID)
+ checkOpen("/fg2/baz_file", true)
+ checkOpen("/fg2/A Subproject", true)
+ checkOpen("/fg2/A Project", true)
- _, err = s.fs.OpenFile("/fg2/baz_file", 0, 0)
- c.Assert(err, check.IsNil)
-
- _, err = s.fs.OpenFile("/fg2/A Subproject", 0, 0)
- c.Assert(err, check.IsNil)
-
- _, err = s.fs.OpenFile("/fg2/A Project", 0, 0)
- c.Assert(err, check.IsNil)
+ // If a filter group matches itself or one of its ancestors,
+ // the matched item appears as an empty directory.
+ checkDirContains("/users/active/A filter group without filters", "A filter group without filters", true)
+ checkOpen("/users/active/A filter group without filters/A filter group without filters", true)
+ checkOpen("/users/active/A filter group without filters/A filter group without filters/baz_file", false)
+ checkDirContains("/users/active/A filter group without filters/A filter group without filters", "", false)
// An 'is_a' 'arvados#collection' filter means only collections should be returned.
+ checkOpen("/users/active/A filter group with an is_a collection filter/baz_file", true)
+ checkOpen("/users/active/A filter group with an is_a collection filter/baz_file/baz", true)
+ checkOpen("/users/active/A filter group with an is_a collection filter/A Subproject", false)
+ checkOpen("/users/active/A filter group with an is_a collection filter/A Project", false)
s.fs.MountProject("fg3", fixtureAFilterGroupThreeUUID)
-
- _, err = s.fs.OpenFile("/fg3/baz_file", 0, 0)
- c.Assert(err, check.IsNil)
-
- _, err = s.fs.OpenFile("/fg3/A Subproject", 0, 0)
- c.Assert(err, check.Not(check.IsNil))
+ checkOpen("/fg3/baz_file", true)
+ checkOpen("/fg3/baz_file/baz", true)
+ checkOpen("/fg3/A Subproject", false)
// An 'exists' 'arvados#collection' filter means only collections with certain properties should be returned.
s.fs.MountProject("fg4", fixtureAFilterGroupFourUUID)
-
- _, err = s.fs.Stat("/fg4/collection with list property with odd values")
- c.Assert(err, check.IsNil)
-
- _, err = s.fs.Stat("/fg4/collection with list property with even values")
- c.Assert(err, check.IsNil)
+ checkOpen("/fg4/collection with list property with odd values", true)
+ checkOpen("/fg4/collection with list property with even values", true)
+ checkOpen("/fg4/baz_file", false)
// A 'contains' 'arvados#collection' filter means only collections with certain properties should be returned.
s.fs.MountProject("fg5", fixtureAFilterGroupFiveUUID)
-
- _, err = s.fs.Stat("/fg5/collection with list property with odd values")
- c.Assert(err, check.IsNil)
-
- _, err = s.fs.Stat("/fg5/collection with list property with string value")
- c.Assert(err, check.IsNil)
-
- _, err = s.fs.Stat("/fg5/collection with prop2 5")
- c.Assert(err, check.Not(check.IsNil))
-
- _, err = s.fs.Stat("/fg5/collection with list property with even values")
- c.Assert(err, check.Not(check.IsNil))
+ checkOpen("/fg5/collection with list property with odd values", true)
+ checkOpen("/fg5/collection with list property with string value", true)
+ checkOpen("/fg5/collection with prop2 5", false)
+ checkOpen("/fg5/collection with list property with even values", false)
}
func (s *SiteFSSuite) TestCurrentUserHome(c *check.C) {
s.fs.MountProject("home", "")
- s.testHomeProject(c, "/home")
+ s.testHomeProject(c, "/home", "home")
}
func (s *SiteFSSuite) TestUsersDir(c *check.C) {
- s.testHomeProject(c, "/users/active")
+ // /users/active is a hardlink to a dir whose name is the UUID
+ // of the active user
+ s.testHomeProject(c, "/users/active", fixtureActiveUserUUID)
}
-func (s *SiteFSSuite) testHomeProject(c *check.C, path string) {
+func (s *SiteFSSuite) testHomeProject(c *check.C, path, expectRealName string) {
f, err := s.fs.Open(path)
c.Assert(err, check.IsNil)
fis, err := f.Readdir(-1)
@@ -130,8 +164,7 @@ func (s *SiteFSSuite) testHomeProject(c *check.C, path string) {
fi, err := f.Stat()
c.Assert(err, check.IsNil)
c.Check(fi.IsDir(), check.Equals, true)
- _, basename := filepath.Split(path)
- c.Check(fi.Name(), check.Equals, basename)
+ c.Check(fi.Name(), check.Equals, expectRealName)
f, err = s.fs.Open(path + "/A Project/A Subproject")
c.Assert(err, check.IsNil)
@@ -263,14 +296,10 @@ func (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {
err = project.Sync()
c.Check(err, check.IsNil)
- _, err = s.fs.Open("/home/A Project/oob/test.txt")
- c.Check(err, check.IsNil)
-
- // Sync again to mark the project dir as stale, so the
- // collection gets reloaded from the controller on next
- // lookup.
- err = project.Sync()
- c.Check(err, check.IsNil)
+ f, err = s.fs.Open("/home/A Project/oob/test.txt")
+ if c.Check(err, check.IsNil) {
+ f.Close()
+ }
// Ensure collection was flushed by Sync
var latest Collection
@@ -288,10 +317,17 @@ func (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {
})
c.Assert(err, check.IsNil)
+ // Sync again to reload collection.
+ err = project.Sync()
+ c.Check(err, check.IsNil)
+
+ // Check test.txt deletion is reflected in fs.
_, err = s.fs.Open("/home/A Project/oob/test.txt")
c.Check(err, check.NotNil)
- _, err = s.fs.Open("/home/A Project/oob")
- c.Check(err, check.IsNil)
+ f, err = s.fs.Open("/home/A Project/oob")
+ if c.Check(err, check.IsNil) {
+ f.Close()
+ }
err = s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+oob.UUID, nil, nil)
c.Assert(err, check.IsNil)
diff --git a/sdk/go/arvados/fs_site.go b/sdk/go/arvados/fs_site.go
index bb2eee7792..d4f0241682 100644
--- a/sdk/go/arvados/fs_site.go
+++ b/sdk/go/arvados/fs_site.go
@@ -5,6 +5,7 @@
package arvados
import (
+ "net/http"
"os"
"strings"
"sync"
@@ -28,6 +29,10 @@ type customFileSystem struct {
staleLock sync.Mutex
forwardSlashNameSubstitution string
+
+ byID map[string]inode
+ byIDLock sync.Mutex
+ byIDRoot *treenode
}
func (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {
@@ -50,6 +55,17 @@ func (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {
},
inodes: make(map[string]inode),
}
+ fs.byID = map[string]inode{}
+ fs.byIDRoot = &treenode{
+ fs: fs,
+ parent: root,
+ inodes: make(map[string]inode),
+ fileinfo: fileinfo{
+ name: "_internal_by_id",
+ modTime: time.Now(),
+ mode: 0755 | os.ModeDir,
+ },
+ }
return fs
}
@@ -68,7 +84,7 @@ func (fs *customFileSystem) MountByID(mount string) {
mode: 0755 | os.ModeDir,
},
},
- create: fs.mountByID,
+ create: fs.newCollectionOrProjectHardlink,
}, nil
})
}
@@ -77,7 +93,7 @@ func (fs *customFileSystem) MountProject(mount, uuid string) {
fs.root.treenode.Lock()
defer fs.root.treenode.Unlock()
fs.root.treenode.Child(mount, func(inode) (inode, error) {
- return fs.newProjectNode(fs.root, mount, uuid, nil), nil
+ return fs.newProjectDir(fs.root, mount, uuid, nil), nil
})
}
@@ -107,6 +123,10 @@ func (fs *customFileSystem) ForwardSlashNameSubstitution(repl string) {
fs.forwardSlashNameSubstitution = repl
}
+func (fs *customFileSystem) MemorySize() int64 {
+ return fs.fileSystem.MemorySize() + fs.byIDRoot.MemorySize()
+}
+
// SiteFileSystem returns a FileSystem that maps collections and other
// Arvados objects onto a filesystem layout.
//
@@ -121,7 +141,7 @@ func (c *Client) SiteFileSystem(kc keepClient) CustomFileSystem {
}
func (fs *customFileSystem) Sync() error {
- return fs.root.Sync()
+ return fs.byIDRoot.Sync()
}
// Stale returns true if information obtained at time t should be
@@ -136,40 +156,58 @@ func (fs *customFileSystem) newNode(name string, perm os.FileMode, modTime time.
return nil, ErrInvalidOperation
}
-func (fs *customFileSystem) mountByID(parent inode, id string) inode {
+func (fs *customFileSystem) newCollectionOrProjectHardlink(parent inode, id string) (inode, error) {
if strings.Contains(id, "-4zz18-") || pdhRegexp.MatchString(id) {
- return fs.mountCollection(parent, id)
- } else if strings.Contains(id, "-j7d0g-") {
- return fs.newProjectNode(fs.root, id, id, nil)
+ node, err := fs.collectionSingleton(id)
+ if os.IsNotExist(err) {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &hardlink{inode: node, parent: parent, name: id}, nil
+ } else if strings.Contains(id, "-j7d0g-") || strings.Contains(id, "-tpzed-") {
+ fs.byIDLock.Lock()
+ node := fs.byID[id]
+ fs.byIDLock.Unlock()
+ if node == nil {
+ // Look up the project synchronously before
+ // calling projectSingleton (otherwise we
+ // wouldn't detect a nonexistent project until
+ // it's too late to return ErrNotExist).
+ proj, err := fs.getProject(id)
+ if os.IsNotExist(err) {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ node = fs.projectSingleton(id, proj)
+ }
+ return &hardlink{inode: node, parent: parent, name: id}, nil
} else {
- return nil
+ return nil, nil
}
}
-func (fs *customFileSystem) mountCollection(parent inode, id string) inode {
- var coll Collection
- err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+id, nil, nil)
- if err != nil {
- return nil
+func (fs *customFileSystem) projectSingleton(uuid string, proj *Group) inode {
+ fs.byIDLock.Lock()
+ defer fs.byIDLock.Unlock()
+ if n := fs.byID[uuid]; n != nil {
+ return n
}
- newfs, err := coll.FileSystem(fs, fs)
- if err != nil {
- return nil
+ name := uuid
+ if name == "" {
+ // special case uuid=="" implements the "home project"
+ // (owner_uuid == current user uuid)
+ name = "home"
}
- cfs := newfs.(*collectionFileSystem)
- cfs.SetParent(parent, id)
- return cfs
-}
-
-func (fs *customFileSystem) newProjectNode(root inode, name, uuid string, proj *Group) inode {
var projLoading sync.Mutex
- return &lookupnode{
+ n := &lookupnode{
stale: fs.Stale,
loadOne: func(parent inode, name string) (inode, error) { return fs.projectsLoadOne(parent, uuid, name) },
loadAll: func(parent inode) ([]inode, error) { return fs.projectsLoadAll(parent, uuid) },
treenode: treenode{
fs: fs,
- parent: root,
+ parent: fs.byIDRoot,
inodes: make(map[string]inode),
fileinfo: fileinfo{
name: name,
@@ -181,17 +219,90 @@ func (fs *customFileSystem) newProjectNode(root inode, name, uuid string, proj *
if proj != nil {
return proj
}
- var g Group
- err := fs.RequestAndDecode(&g, "GET", "arvados/v1/groups/"+uuid, nil, nil)
+ g, err := fs.getProject(uuid)
if err != nil {
return err
}
- proj = &g
+ proj = g
return proj
},
},
},
}
+ fs.byID[uuid] = n
+ return n
+}
+
+func (fs *customFileSystem) getProject(uuid string) (*Group, error) {
+ var g Group
+ err := fs.RequestAndDecode(&g, "GET", "arvados/v1/groups/"+uuid, nil, nil)
+ if statusErr, ok := err.(interface{ HTTPStatus() int }); ok && statusErr.HTTPStatus() == http.StatusNotFound {
+ return nil, os.ErrNotExist
+ } else if err != nil {
+ return nil, err
+ }
+ return &g, err
+}
+
+func (fs *customFileSystem) collectionSingleton(id string) (inode, error) {
+ // Return existing singleton, if we have it
+ fs.byIDLock.Lock()
+ existing := fs.byID[id]
+ fs.byIDLock.Unlock()
+ if existing != nil {
+ return existing, nil
+ }
+
+ coll, err := fs.getCollection(id)
+ if err != nil {
+ return nil, err
+ }
+ newfs, err := coll.FileSystem(fs, fs)
+ if err != nil {
+ return nil, err
+ }
+ cfs := newfs.(*collectionFileSystem)
+ cfs.SetParent(fs.byIDRoot, id)
+
+ // Check again in case another goroutine has added a node to
+ // fs.byID since we checked above.
+ fs.byIDLock.Lock()
+ defer fs.byIDLock.Unlock()
+ if existing = fs.byID[id]; existing != nil {
+ // Other goroutine won the race. Discard the node we
+ // just made, and return the race winner.
+ return existing, nil
+ }
+ // We won the race. Save the new node in fs.byID and
+ // fs.byIDRoot.
+ fs.byID[id] = cfs
+ fs.byIDRoot.Lock()
+ defer fs.byIDRoot.Unlock()
+ fs.byIDRoot.Child(id, func(inode) (inode, error) { return cfs, nil })
+ return cfs, nil
+}
+
+func (fs *customFileSystem) getCollection(id string) (*Collection, error) {
+ var coll Collection
+ err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+id, nil, nil)
+ if statusErr, ok := err.(interface{ HTTPStatus() int }); ok && statusErr.HTTPStatus() == http.StatusNotFound {
+ return nil, os.ErrNotExist
+ } else if err != nil {
+ return nil, err
+ }
+ if len(id) != 27 {
+ // This means id is a PDH, and controller/railsapi
+ // returned one of (possibly) many collections with
+ // that PDH. Even if controller returns more fields
+ // besides PDH and manifest text (which are equal for
+ // all matching collections), we don't want to expose
+ // them (e.g., through Sys()).
+ coll = Collection{
+ PortableDataHash: coll.PortableDataHash,
+ ManifestText: coll.ManifestText,
+ }
+ }
+ return &coll, nil
}
// vdirnode wraps an inode by rejecting (with ErrInvalidOperation)
@@ -202,15 +313,19 @@ func (fs *customFileSystem) newProjectNode(root inode, name, uuid string, proj *
// treenode, or nil for ENOENT.
type vdirnode struct {
treenode
- create func(parent inode, name string) inode
+ create func(parent inode, name string) (inode, error)
}
func (vn *vdirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
return vn.treenode.Child(name, func(existing inode) (inode, error) {
if existing == nil && vn.create != nil {
- existing = vn.create(vn, name)
- if existing != nil {
- existing.SetParent(vn, name)
+ newnode, err := vn.create(vn, name)
+ if err != nil {
+ return nil, err
+ }
+ if newnode != nil {
+ newnode.SetParent(vn, name)
+ existing = newnode
vn.treenode.fileinfo.modTime = time.Now()
}
}
@@ -225,3 +340,57 @@ func (vn *vdirnode) Child(name string, replace func(inode) (inode, error)) (inod
}
})
}
+
+// A hardlink can be used to mount an existing node at an additional
+// point in the same filesystem.
+type hardlink struct {
+ inode
+ parent inode
+ name string
+}
+
+// If the wrapped inode is a filesystem, rootnode returns the wrapped
+// fs's rootnode, otherwise inode itself. This allows
+// (*fileSystem)Rename() to lock the root node of a hardlink-wrapped
+// filesystem.
+func (hl *hardlink) rootnode() inode {
+ if node, ok := hl.inode.(interface{ rootnode() inode }); ok {
+ return node.rootnode()
+ } else {
+ return hl.inode
+ }
+}
+
+func (hl *hardlink) Sync() error {
+ if node, ok := hl.inode.(syncer); ok {
+ return node.Sync()
+ } else {
+ return ErrInvalidOperation
+ }
+}
+
+func (hl *hardlink) SetParent(parent inode, name string) {
+ hl.Lock()
+ defer hl.Unlock()
+ hl.parent = parent
+ hl.name = name
+}
+
+func (hl *hardlink) Parent() inode {
+ hl.RLock()
+ defer hl.RUnlock()
+ return hl.parent
+}
+
+func (hl *hardlink) FileInfo() os.FileInfo {
+ fi := hl.inode.FileInfo()
+ if fi, ok := fi.(fileinfo); ok {
+ fi.name = hl.name
+ return fi
+ }
+ return fi
+}
+
+func (hl *hardlink) MemorySize() int64 {
+ return 64 + int64(len(hl.name))
+}
diff --git a/sdk/go/arvados/fs_site_test.go b/sdk/go/arvados/fs_site_test.go
index 3abe2b457f..2c86536b2f 100644
--- a/sdk/go/arvados/fs_site_test.go
+++ b/sdk/go/arvados/fs_site_test.go
@@ -22,6 +22,7 @@ const (
// Importing arvadostest would be an import cycle, so these
// fixtures are duplicated here [until fs moves to a separate
// package].
+ fixtureActiveUserUUID = "zzzzz-tpzed-xurymjxw79nv3jz"
fixtureActiveToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
fixtureAProjectUUID = "zzzzz-j7d0g-v955i6s2oi1cbso"
fixtureThisFilterGroupUUID = "zzzzz-j7d0g-thisfiltergroup"
@@ -97,6 +98,55 @@ func (s *SiteFSSuite) TestUpdateStorageClasses(c *check.C) {
c.Assert(err, check.ErrorMatches, `.*stub does not write storage class "archive"`)
}
+func (s *SiteFSSuite) TestSameCollectionDifferentPaths(c *check.C) {
+ s.fs.MountProject("home", "")
+ var coll Collection
+ err := s.client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]interface{}{
+ "owner_uuid": fixtureAProjectUUID,
+ "name": fmt.Sprintf("test collection %d", time.Now().UnixNano()),
+ },
+ })
+ c.Assert(err, check.IsNil)
+
+ viaProjID := "by_id/" + fixtureAProjectUUID + "/" + coll.Name
+ viaProjName := "home/A Project/" + coll.Name
+ viaCollID := "by_id/" + coll.UUID
+ for n, dirs := range [][]string{
+ {viaCollID, viaProjID, viaProjName},
+ {viaCollID, viaProjName, viaProjID},
+ {viaProjID, viaProjName, viaCollID},
+ {viaProjID, viaCollID, viaProjName},
+ {viaProjName, viaCollID, viaProjID},
+ {viaProjName, viaProjID, viaCollID},
+ } {
+ filename := fmt.Sprintf("file %d", n)
+ f := make([]File, 3)
+ for i, dir := range dirs {
+ path := dir + "/" + filename
+ mode := os.O_RDWR
+ if i == 0 {
+ mode |= os.O_CREATE
+ c.Logf("create %s", path)
+ } else {
+ c.Logf("open %s", path)
+ }
+ f[i], err = s.fs.OpenFile(path, mode, 0777)
+ c.Assert(err, check.IsNil, check.Commentf("n=%d i=%d path=%s", n, i, path))
+ defer f[i].Close()
+ }
+ _, err = io.WriteString(f[0], filename)
+ c.Assert(err, check.IsNil)
+ _, err = f[1].Seek(0, io.SeekEnd)
+ c.Assert(err, check.IsNil)
+ _, err = io.WriteString(f[1], filename)
+ c.Assert(err, check.IsNil)
+ buf, err := io.ReadAll(f[2])
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Equals, filename+filename)
+ }
+}
+
func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
f, err := s.fs.Open("/by_id")
c.Assert(err, check.IsNil)
@@ -135,6 +185,16 @@ func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
names = append(names, fi.Name())
}
c.Check(names, check.DeepEquals, []string{"baz"})
+ f, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file/baz")
+ c.Assert(err, check.IsNil)
+ err = f.Close()
+ c.Assert(err, check.IsNil)
+ _, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file/baz/")
+ c.Assert(err, check.Equals, ErrNotADirectory)
+ _, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file/baz/z")
+ c.Assert(err, check.Equals, ErrNotADirectory)
+ _, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file/baz/..")
+ c.Assert(err, check.Equals, ErrNotADirectory)
_, err = s.fs.OpenFile("/by_id/"+fixtureNonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
c.Check(err, ErrorIs, ErrInvalidOperation)
diff --git a/sdk/go/arvados/fs_users.go b/sdk/go/arvados/fs_users.go
index ae47414b7a..5f9edb40fd 100644
--- a/sdk/go/arvados/fs_users.go
+++ b/sdk/go/arvados/fs_users.go
@@ -20,7 +20,7 @@ func (fs *customFileSystem) usersLoadOne(parent inode, name string) (inode, erro
return nil, os.ErrNotExist
}
user := resp.Items[0]
- return fs.newProjectNode(parent, user.Username, user.UUID, nil), nil
+ return fs.newProjectDir(parent, user.Username, user.UUID, nil), nil
}
func (fs *customFileSystem) usersLoadAll(parent inode) ([]inode, error) {
@@ -41,7 +41,7 @@ func (fs *customFileSystem) usersLoadAll(parent inode) ([]inode, error) {
if user.Username == "" {
continue
}
- inodes = append(inodes, fs.newProjectNode(parent, user.Username, user.UUID, nil))
+ inodes = append(inodes, fs.newProjectDir(parent, user.Username, user.UUID, nil))
}
params.Filters = []Filter{{"uuid", ">", resp.Items[len(resp.Items)-1].UUID}}
}
diff --git a/sdk/go/arvados/keep_cache.go b/sdk/go/arvados/keep_cache.go
new file mode 100644
index 0000000000..108081d5ac
--- /dev/null
+++ b/sdk/go/arvados/keep_cache.go
@@ -0,0 +1,744 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type KeepGateway interface {
+ ReadAt(locator string, dst []byte, offset int) (int, error)
+ BlockRead(ctx context.Context, opts BlockReadOptions) (int, error)
+ BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error)
+ LocalLocator(locator string) (string, error)
+}
+
+// DiskCache wraps KeepGateway, adding a disk-based cache layer.
+//
+// A DiskCache is automatically incorporated into the backend stack of
+// each keepclient.KeepClient. Most programs do not need to use
+// DiskCache directly.
+type DiskCache struct {
+ KeepGateway
+ Dir string
+ MaxSize ByteSizeOrPercent
+ Logger logrus.FieldLogger
+
+ *sharedCache
+ setupOnce sync.Once
+}
+
+var (
+ sharedCachesLock sync.Mutex
+ sharedCaches = map[string]*sharedCache{}
+)
+
+// sharedCache has fields that coordinate the cache usage in a single
+// cache directory; it can be shared by multiple DiskCaches.
+//
+// This serves to share a single pool of held-open filehandles, a
+// single tidying goroutine, etc., even when the program (like
+// keep-web) uses multiple KeepGateway stacks that use different auth
+// tokens, etc.
+type sharedCache struct {
+ dir string
+ maxSize ByteSizeOrPercent
+
+ tidying int32 // see tidy()
+ defaultMaxSize int64
+
+ // The "heldopen" fields are used to open cache files for
+ // reading, and leave them open for future/concurrent ReadAt
+ // operations. See quickReadAt.
+ heldopen map[string]*openFileEnt
+ heldopenMax int
+ heldopenLock sync.Mutex
+
+ // The "writing" fields allow multiple concurrent/sequential
+ // ReadAt calls to be notified as a single
+ // read-block-from-backend-into-cache goroutine fills the
+ // cache file.
+ writing map[string]*writeprogress
+ writingCond *sync.Cond
+ writingLock sync.Mutex
+
+ sizeMeasured int64 // actual size on disk after last tidy(); zero if not measured yet
+ sizeEstimated int64 // last measured size, plus files we have written since
+ lastFileCount int64 // number of files on disk at last count
+ writesSinceTidy int64 // number of files written since last tidy()
+}
+
+type writeprogress struct {
+ cond *sync.Cond // broadcast whenever size or done changes
+ done bool // size and err have their final values
+ size int // bytes copied into cache file so far
+ err error // error encountered while copying from backend to cache
+ sharedf *os.File // readable filehandle, usable if done && err==nil
+ readers sync.WaitGroup // goroutines that haven't finished reading from f yet
+}
+
+type openFileEnt struct {
+ sync.RWMutex
+ f *os.File
+ err error // if err is non-nil, f should not be used.
+}
+
+const (
+ cacheFileSuffix = ".keepcacheblock"
+ tmpFileSuffix = ".tmp"
+)
+
+func (cache *DiskCache) setup() {
+ sharedCachesLock.Lock()
+ defer sharedCachesLock.Unlock()
+ dir := cache.Dir
+ if sharedCaches[dir] == nil {
+ sharedCaches[dir] = &sharedCache{dir: dir, maxSize: cache.MaxSize}
+ }
+ cache.sharedCache = sharedCaches[dir]
+}
+
+func (cache *DiskCache) cacheFile(locator string) string {
+ hash := locator
+ if i := strings.Index(hash, "+"); i > 0 {
+ hash = hash[:i]
+ }
+ return filepath.Join(cache.dir, hash[:3], hash+cacheFileSuffix)
+}
+
+// Open a cache file, creating the parent dir if necessary.
+func (cache *DiskCache) openFile(name string, flags int) (*os.File, error) {
+ f, err := os.OpenFile(name, flags, 0600)
+ if os.IsNotExist(err) {
+ // Create the parent dir and try again. (We could have
+ // checked/created the parent dir before, but that
+ // would be less efficient in the much more common
+ // situation where it already exists.)
+ parent, _ := filepath.Split(name)
+ os.Mkdir(parent, 0700)
+ f, err = os.OpenFile(name, flags, 0600)
+ }
+ return f, err
+}
+
+// Rename a file, creating the new path's parent dir if necessary.
+func (cache *DiskCache) rename(old, new string) error {
+ if nil == os.Rename(old, new) {
+ return nil
+ }
+ parent, _ := filepath.Split(new)
+ os.Mkdir(parent, 0700)
+ return os.Rename(old, new)
+}
+
+func (cache *DiskCache) debugf(format string, args ...interface{}) {
+ logger := cache.Logger
+ if logger == nil {
+ return
+ }
+ logger.Debugf(format, args...)
+}
+
+// BlockWrite writes through to the wrapped KeepGateway, and (if
+// possible) retains a copy of the written block in the cache.
+func (cache *DiskCache) BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {
+ cache.setupOnce.Do(cache.setup)
+ unique := fmt.Sprintf("%x.%p%s", os.Getpid(), &opts, tmpFileSuffix)
+ tmpfilename := filepath.Join(cache.dir, "tmp", unique)
+ tmpfile, err := cache.openFile(tmpfilename, os.O_CREATE|os.O_EXCL|os.O_RDWR)
+ if err != nil {
+ cache.debugf("BlockWrite: open(%s) failed: %s", tmpfilename, err)
+ return cache.KeepGateway.BlockWrite(ctx, opts)
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ copyerr := make(chan error, 1)
+
+ // Start a goroutine to copy the caller's source data to
+ // tmpfile, a hash checker, and (via pipe) the wrapped
+ // KeepGateway.
+ pipereader, pipewriter := io.Pipe()
+ defer pipereader.Close()
+ go func() {
+ // Note this is a double-close (which is a no-op) in
+ // the happy path.
+ defer tmpfile.Close()
+ // Note this is a no-op in the happy path (the
+ // uniquely named tmpfilename will have been renamed).
+ defer os.Remove(tmpfilename)
+ defer pipewriter.Close()
+
+ // Copy from opts.Data or opts.Reader, depending on
+ // which was provided.
+ var src io.Reader
+ if opts.Data != nil {
+ src = bytes.NewReader(opts.Data)
+ } else {
+ src = opts.Reader
+ }
+
+ hashcheck := md5.New()
+ n, err := io.Copy(io.MultiWriter(tmpfile, pipewriter, hashcheck), src)
+ if err != nil {
+ copyerr <- err
+ cancel()
+ return
+ } else if opts.DataSize > 0 && opts.DataSize != int(n) {
+ copyerr <- fmt.Errorf("block size %d did not match provided size %d", n, opts.DataSize)
+ cancel()
+ return
+ }
+ err = tmpfile.Close()
+ if err != nil {
+ // Don't rename tmpfile into place, but allow
+ // the BlockWrite call to succeed if nothing
+ // else goes wrong.
+ return
+ }
+ hash := fmt.Sprintf("%x", hashcheck.Sum(nil))
+ if opts.Hash != "" && opts.Hash != hash {
+ // Even if the wrapped KeepGateway doesn't
+ // notice a problem, this should count as an
+ // error.
+ copyerr <- fmt.Errorf("block hash %s did not match provided hash %s", hash, opts.Hash)
+ cancel()
+ return
+ }
+ cachefilename := cache.cacheFile(hash)
+ err = cache.rename(tmpfilename, cachefilename)
+ if err != nil {
+ cache.debugf("BlockWrite: rename(%s, %s) failed: %s", tmpfilename, cachefilename, err)
+ }
+ atomic.AddInt64(&cache.sizeEstimated, int64(n))
+ cache.gotidy()
+ }()
+
+ // Write through to the wrapped KeepGateway from the pipe,
+ // instead of the original reader.
+ newopts := opts
+ if newopts.DataSize == 0 {
+ newopts.DataSize = len(newopts.Data)
+ }
+ newopts.Reader = pipereader
+ newopts.Data = nil
+
+ resp, err := cache.KeepGateway.BlockWrite(ctx, newopts)
+ if len(copyerr) > 0 {
+ // If the copy-to-pipe goroutine failed, that error
+ // will be more helpful than the resulting "context
+ // canceled" or "read [from pipereader] failed" error
+ // seen by the wrapped KeepGateway.
+ //
+ // If the wrapped KeepGateway encounters an error
+ // before all the data is copied into the pipe, it
+ // stops reading from the pipe, which causes the
+ // io.Copy() in the goroutine to block until our
+ // deferred pipereader.Close() call runs. In that case
+ // len(copyerr)==0 here, so the wrapped KeepGateway
+ // error is the one we return to our caller.
+ err = <-copyerr
+ }
+ return resp, err
+}
+
+type funcwriter func([]byte) (int, error)
+
+func (fw funcwriter) Write(p []byte) (int, error) {
+ return fw(p)
+}
+
+// ReadAt reads the entire block from the wrapped KeepGateway into the
+// cache if needed, and copies the requested portion into the provided
+// slice.
+//
+// ReadAt returns as soon as the requested portion is available in the
+// cache. The remainder of the block may continue to be copied into
+// the cache in the background.
+func (cache *DiskCache) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ cache.setupOnce.Do(cache.setup)
+ cachefilename := cache.cacheFile(locator)
+ if n, err := cache.quickReadAt(cachefilename, dst, offset); err == nil {
+ return n, nil
+ }
+
+ cache.writingLock.Lock()
+ progress := cache.writing[cachefilename]
+ if progress == nil {
+ // Nobody else is fetching from backend, so we'll add
+ // a new entry to cache.writing, fetch in a separate
+ // goroutine.
+ progress = &writeprogress{}
+ progress.cond = sync.NewCond(&sync.Mutex{})
+ if cache.writing == nil {
+ cache.writing = map[string]*writeprogress{}
+ }
+ cache.writing[cachefilename] = progress
+
+ // Start a goroutine to copy from backend to f. As
+ // data arrives, wake up any waiting loops (see below)
+ // so ReadAt() requests for partial data can return as
+ // soon as the relevant bytes have been copied.
+ go func() {
+ var size int
+ var err error
+ defer func() {
+ if err == nil && progress.sharedf != nil {
+ err = progress.sharedf.Sync()
+ }
+ progress.cond.L.Lock()
+ progress.err = err
+ progress.done = true
+ progress.size = size
+ progress.cond.L.Unlock()
+ progress.cond.Broadcast()
+ cache.writingLock.Lock()
+ delete(cache.writing, cachefilename)
+ cache.writingLock.Unlock()
+
+ // Wait for other goroutines to wake
+ // up, notice we're done, and use our
+ // sharedf to read their data, before
+ // we close sharedf.
+ //
+ // Nobody can join the WaitGroup after
+ // the progress entry is deleted from
+ // cache.writing above. Therefore,
+ // this Wait ensures nobody else is
+ // accessing progress, and we don't
+ // need to lock anything.
+ progress.readers.Wait()
+ progress.sharedf.Close()
+ }()
+ progress.sharedf, err = cache.openFile(cachefilename, os.O_CREATE|os.O_RDWR)
+ if err != nil {
+ err = fmt.Errorf("ReadAt: %w", err)
+ return
+ }
+ err = syscall.Flock(int(progress.sharedf.Fd()), syscall.LOCK_SH)
+ if err != nil {
+ err = fmt.Errorf("flock(%s, lock_sh) failed: %w", cachefilename, err)
+ return
+ }
+ size, err = cache.KeepGateway.BlockRead(context.Background(), BlockReadOptions{
+ Locator: locator,
+ WriteTo: funcwriter(func(p []byte) (int, error) {
+ n, err := progress.sharedf.Write(p)
+ if n > 0 {
+ progress.cond.L.Lock()
+ progress.size += n
+ progress.cond.L.Unlock()
+ progress.cond.Broadcast()
+ }
+ return n, err
+ })})
+ atomic.AddInt64(&cache.sizeEstimated, int64(size))
+ cache.gotidy()
+ }()
+ }
+ // We add ourselves to the readers WaitGroup so the
+ // fetch-from-backend goroutine doesn't close the shared
+ // filehandle before we read the data we need from it.
+ progress.readers.Add(1)
+ defer progress.readers.Done()
+ cache.writingLock.Unlock()
+
+ progress.cond.L.Lock()
+ for !progress.done && progress.size < len(dst)+offset {
+ progress.cond.Wait()
+ }
+ sharedf := progress.sharedf
+ err := progress.err
+ progress.cond.L.Unlock()
+
+ if err != nil {
+ // If the copy-from-backend goroutine encountered an
+ // error, we return that error. (Even if we read the
+ // desired number of bytes, the error might be
+ // something like BadChecksum so we should not ignore
+ // it.)
+ return 0, err
+ }
+ if len(dst) == 0 {
+ // It's possible that sharedf==nil here (the writer
+ // goroutine might not have done anything at all yet)
+ // and we don't need it anyway because no bytes are
+ // being read. Reading zero bytes seems pointless, but
+ // if someone does it, we might as well return
+ // suitable values, rather than risk a crash by
+ // calling sharedf.ReadAt() when sharedf is nil.
+ return 0, nil
+ }
+ return sharedf.ReadAt(dst, int64(offset))
+}
+
+var quickReadAtLostRace = errors.New("quickReadAt: lost race")
+
+// Remove the cache entry for the indicated cachefilename if it
+// matches expect (quickReadAt() usage), or if expect is nil (tidy()
+// usage).
+//
+// If expect is non-nil, close expect's filehandle.
+//
+// If expect is nil and a different cache entry is deleted, close its
+// filehandle.
+func (cache *DiskCache) deleteHeldopen(cachefilename string, expect *openFileEnt) {
+ needclose := expect
+
+ cache.heldopenLock.Lock()
+ found := cache.heldopen[cachefilename]
+ if found != nil && (expect == nil || expect == found) {
+ delete(cache.heldopen, cachefilename)
+ needclose = found
+ }
+ cache.heldopenLock.Unlock()
+
+ if needclose != nil {
+ needclose.Lock()
+ defer needclose.Unlock()
+ if needclose.f != nil {
+ needclose.f.Close()
+ needclose.f = nil
+ }
+ }
+}
+
+// quickReadAt attempts to use a cached-filehandle approach to read
+// from the indicated file. The expectation is that the caller
+// (ReadAt) will try a more robust approach when this fails, so
+// quickReadAt doesn't try especially hard to ensure success in
+// races. In particular, when there are concurrent calls, and one
+// fails, that can cause others to fail too.
+func (cache *DiskCache) quickReadAt(cachefilename string, dst []byte, offset int) (int, error) {
+ isnew := false
+ cache.heldopenLock.Lock()
+ if cache.heldopenMax == 0 {
+ // Choose a reasonable limit on open cache files based
+ // on RLIMIT_NOFILE. Note Go automatically raises
+ // softlimit to hardlimit, so it's typically 1048576,
+ // not 1024.
+ lim := syscall.Rlimit{}
+ err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)
+ if err != nil {
+ cache.heldopenMax = 100
+ } else if lim.Cur > 400000 {
+ cache.heldopenMax = 10000
+ } else {
+ cache.heldopenMax = int(lim.Cur / 40)
+ }
+ }
+ heldopen := cache.heldopen[cachefilename]
+ if heldopen == nil {
+ isnew = true
+ heldopen = &openFileEnt{}
+ if cache.heldopen == nil {
+ cache.heldopen = make(map[string]*openFileEnt, cache.heldopenMax)
+ } else if len(cache.heldopen) > cache.heldopenMax {
+ // Rather than go to the trouble of tracking
+ // last access time, just close all files, and
+ // open again as needed. Even in the worst
+ // pathological case, this causes one extra
+ // open+close per read, which is not
+ // especially bad (see benchmarks).
+ go func(m map[string]*openFileEnt) {
+ for _, heldopen := range m {
+ heldopen.Lock()
+ defer heldopen.Unlock()
+ if heldopen.f != nil {
+ heldopen.f.Close()
+ heldopen.f = nil
+ }
+ }
+ }(cache.heldopen)
+ cache.heldopen = nil
+ }
+ cache.heldopen[cachefilename] = heldopen
+ heldopen.Lock()
+ }
+ cache.heldopenLock.Unlock()
+
+ if isnew {
+ // Open and flock the file, save the filehandle (or
+ // error) in heldopen.f, and release the write lock so
+ // other goroutines waiting at heldopen.RLock() below
+ // can use the shared filehandle (or shared error).
+ f, err := os.Open(cachefilename)
+ if err == nil {
+ err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)
+ if err == nil {
+ heldopen.f = f
+ } else {
+ f.Close()
+ }
+ }
+ if err != nil {
+ heldopen.err = err
+ go cache.deleteHeldopen(cachefilename, heldopen)
+ }
+ heldopen.Unlock()
+ }
+ // Acquire read lock to ensure (1) initialization is complete,
+ // if it's done by a different goroutine, and (2) any "delete
+ // old/unused entries" waits for our read to finish before
+ // closing the file.
+ heldopen.RLock()
+ defer heldopen.RUnlock()
+ if heldopen.err != nil {
+ // Other goroutine encountered an error during setup
+ return 0, heldopen.err
+ } else if heldopen.f == nil {
+ // Other goroutine closed the file before we got RLock
+ return 0, quickReadAtLostRace
+ }
+
+ // If another goroutine is currently writing the file, wait
+ // for it to catch up to the end of the range we need.
+ cache.writingLock.Lock()
+ progress := cache.writing[cachefilename]
+ cache.writingLock.Unlock()
+ if progress != nil {
+ progress.cond.L.Lock()
+ for !progress.done && progress.size < len(dst)+offset {
+ progress.cond.Wait()
+ }
+ progress.cond.L.Unlock()
+ // If size= len(opts.Locator) {
+ return 0, errors.New("invalid block locator: no size hint")
+ }
+ sizestr := opts.Locator[i+1:]
+ i = strings.Index(sizestr, "+")
+ if i > 0 {
+ sizestr = sizestr[:i]
+ }
+ blocksize, err := strconv.ParseInt(sizestr, 10, 32)
+ if err != nil || blocksize < 0 {
+ return 0, errors.New("invalid block locator: invalid size hint")
+ }
+
+ offset := 0
+ buf := make([]byte, 131072)
+ for offset < int(blocksize) {
+ if ctx.Err() != nil {
+ return offset, ctx.Err()
+ }
+ if int(blocksize)-offset < len(buf) {
+ buf = buf[:int(blocksize)-offset]
+ }
+ nr, err := cache.ReadAt(opts.Locator, buf, offset)
+ if nr > 0 {
+ nw, err := opts.WriteTo.Write(buf[:nr])
+ if err != nil {
+ return offset + nw, err
+ }
+ }
+ offset += nr
+ if err != nil {
+ return offset, err
+ }
+ }
+ return offset, nil
+}
+
+// Start a tidy() goroutine, unless one is already running / recently
+// finished.
+func (cache *DiskCache) gotidy() {
+ writes := atomic.AddInt64(&cache.writesSinceTidy, 1)
+ // Skip if another tidy goroutine is running in this process.
+ n := atomic.AddInt32(&cache.tidying, 1)
+ if n != 1 {
+ atomic.AddInt32(&cache.tidying, -1)
+ return
+ }
+ // Skip if sizeEstimated is based on an actual measurement and
+ // is below maxSize, and we haven't done very many writes
+ // since last tidy (defined as 1% of number of cache files at
+ // last count).
+ if cache.sizeMeasured > 0 &&
+ atomic.LoadInt64(&cache.sizeEstimated) < atomic.LoadInt64(&cache.defaultMaxSize) &&
+ writes < cache.lastFileCount/100 {
+ atomic.AddInt32(&cache.tidying, -1)
+ return
+ }
+ go func() {
+ cache.tidy()
+ atomic.StoreInt64(&cache.writesSinceTidy, 0)
+ atomic.AddInt32(&cache.tidying, -1)
+ }()
+}
+
+// Delete cache files as needed to control disk usage.
+func (cache *DiskCache) tidy() {
+ maxsize := int64(cache.maxSize.ByteSize())
+ if maxsize < 1 {
+ maxsize = atomic.LoadInt64(&cache.defaultMaxSize)
+ if maxsize == 0 {
+ // defaultMaxSize not yet computed. Use 10% of
+ // filesystem capacity (or different
+ // percentage if indicated by cache.maxSize)
+ pct := cache.maxSize.Percent()
+ if pct == 0 {
+ pct = 10
+ }
+ var stat unix.Statfs_t
+ if nil == unix.Statfs(cache.dir, &stat) {
+ maxsize = int64(stat.Bavail) * stat.Bsize * pct / 100
+ atomic.StoreInt64(&cache.defaultMaxSize, maxsize)
+ } else {
+ // In this case we will set
+ // defaultMaxSize below after
+ // measuring current usage.
+ }
+ }
+ }
+
+ // Bail if a tidy goroutine is running in a different process.
+ lockfile, err := cache.openFile(filepath.Join(cache.dir, "tmp", "tidy.lock"), os.O_CREATE|os.O_WRONLY)
+ if err != nil {
+ return
+ }
+ defer lockfile.Close()
+ err = syscall.Flock(int(lockfile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+ if err != nil {
+ return
+ }
+
+ type entT struct {
+ path string
+ atime time.Time
+ size int64
+ }
+ var ents []entT
+ var totalsize int64
+ filepath.Walk(cache.dir, func(path string, info fs.FileInfo, err error) error {
+ if err != nil {
+ cache.debugf("tidy: skipping dir %s: %s", path, err)
+ return nil
+ }
+ if info.IsDir() {
+ return nil
+ }
+ if !strings.HasSuffix(path, cacheFileSuffix) && !strings.HasSuffix(path, tmpFileSuffix) {
+ return nil
+ }
+ var atime time.Time
+ if stat, ok := info.Sys().(*syscall.Stat_t); ok {
+ // Access time is available (hopefully the
+ // filesystem is not mounted with noatime)
+ atime = time.Unix(stat.Atim.Sec, stat.Atim.Nsec)
+ } else {
+ // If access time isn't available we fall back
+ // to sorting by modification time.
+ atime = info.ModTime()
+ }
+ ents = append(ents, entT{path, atime, info.Size()})
+ totalsize += info.Size()
+ return nil
+ })
+ if cache.Logger != nil {
+ cache.Logger.WithFields(logrus.Fields{
+ "totalsize": totalsize,
+ "maxsize": maxsize,
+ }).Debugf("DiskCache: checked current cache usage")
+ }
+
+ // If MaxSize wasn't specified and we failed to come up with a
+ // defaultSize above, use the larger of {current cache size, 1
+ // GiB} as the defaultMaxSize for subsequent tidy()
+ // operations.
+ if maxsize == 0 {
+ if totalsize < 1<<30 {
+ atomic.StoreInt64(&cache.defaultMaxSize, 1<<30)
+ } else {
+ atomic.StoreInt64(&cache.defaultMaxSize, totalsize)
+ }
+ cache.debugf("found initial size %d, setting defaultMaxSize %d", totalsize, cache.defaultMaxSize)
+ return
+ }
+
+ // If we're below MaxSize or there's only one block in the
+ // cache, just update the usage estimate and return.
+ //
+ // (We never delete the last block because that would merely
+ // cause the same block to get re-fetched repeatedly from the
+ // backend.)
+ if totalsize <= maxsize || len(ents) == 1 {
+ atomic.StoreInt64(&cache.sizeMeasured, totalsize)
+ atomic.StoreInt64(&cache.sizeEstimated, totalsize)
+ cache.lastFileCount = int64(len(ents))
+ return
+ }
+
+ // Set a new size target of maxsize minus 5%. This makes some
+ // room for sizeEstimate to grow before it triggers another
+ // tidy. We don't want to walk/sort an entire large cache
+ // directory each time we write a block.
+ target := maxsize - (maxsize / 20)
+
+ // Delete oldest entries until totalsize < target or we're
+ // down to a single cached block.
+ sort.Slice(ents, func(i, j int) bool {
+ return ents[i].atime.Before(ents[j].atime)
+ })
+ deleted := 0
+ for _, ent := range ents {
+ os.Remove(ent.path)
+ go cache.deleteHeldopen(ent.path, nil)
+ deleted++
+ totalsize -= ent.size
+ if totalsize <= target || deleted == len(ents)-1 {
+ break
+ }
+ }
+
+ if cache.Logger != nil {
+ cache.Logger.WithFields(logrus.Fields{
+ "deleted": deleted,
+ "totalsize": totalsize,
+ }).Debugf("DiskCache: remaining cache usage after deleting")
+ }
+ atomic.StoreInt64(&cache.sizeMeasured, totalsize)
+ atomic.StoreInt64(&cache.sizeEstimated, totalsize)
+ cache.lastFileCount = int64(len(ents) - deleted)
+}
diff --git a/sdk/go/arvados/keep_cache_test.go b/sdk/go/arvados/keep_cache_test.go
new file mode 100644
index 0000000000..776d9bb652
--- /dev/null
+++ b/sdk/go/arvados/keep_cache_test.go
@@ -0,0 +1,464 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&keepCacheSuite{})
+
+type keepCacheSuite struct {
+}
+
+type keepGatewayBlackHole struct {
+}
+
+func (*keepGatewayBlackHole) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ return 0, errors.New("block not found")
+}
+func (*keepGatewayBlackHole) BlockRead(ctx context.Context, opts BlockReadOptions) (int, error) {
+ return 0, errors.New("block not found")
+}
+func (*keepGatewayBlackHole) LocalLocator(locator string) (string, error) {
+ return locator, nil
+}
+func (*keepGatewayBlackHole) BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {
+ h := md5.New()
+ var size int64
+ if opts.Reader == nil {
+ size, _ = io.Copy(h, bytes.NewReader(opts.Data))
+ } else {
+ size, _ = io.Copy(h, opts.Reader)
+ }
+ return BlockWriteResponse{Locator: fmt.Sprintf("%x+%d", h.Sum(nil), size), Replicas: 1}, nil
+}
+
+type keepGatewayMemoryBacked struct {
+ mtx sync.RWMutex
+ data map[string][]byte
+ pauseBlockReadAfter int
+ pauseBlockReadUntil chan error
+}
+
+func (k *keepGatewayMemoryBacked) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ k.mtx.RLock()
+ data := k.data[locator]
+ k.mtx.RUnlock()
+ if data == nil {
+ return 0, errors.New("block not found: " + locator)
+ }
+ var n int
+ if len(data) > offset {
+ n = copy(dst, data[offset:])
+ }
+ if n < len(dst) {
+ return n, io.EOF
+ }
+ return n, nil
+}
+func (k *keepGatewayMemoryBacked) BlockRead(ctx context.Context, opts BlockReadOptions) (int, error) {
+ k.mtx.RLock()
+ data := k.data[opts.Locator]
+ k.mtx.RUnlock()
+ if data == nil {
+ return 0, errors.New("block not found: " + opts.Locator)
+ }
+ if k.pauseBlockReadUntil != nil {
+ src := bytes.NewReader(data)
+ n, err := io.CopyN(opts.WriteTo, src, int64(k.pauseBlockReadAfter))
+ if err != nil {
+ return int(n), err
+ }
+ <-k.pauseBlockReadUntil
+ n2, err := io.Copy(opts.WriteTo, src)
+ return int(n + n2), err
+ }
+ return opts.WriteTo.Write(data)
+}
+func (k *keepGatewayMemoryBacked) LocalLocator(locator string) (string, error) {
+ return locator, nil
+}
+func (k *keepGatewayMemoryBacked) BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {
+ h := md5.New()
+ data := bytes.NewBuffer(nil)
+ if opts.Reader == nil {
+ data.Write(opts.Data)
+ h.Write(data.Bytes())
+ } else {
+ io.Copy(io.MultiWriter(h, data), opts.Reader)
+ }
+ locator := fmt.Sprintf("%x+%d", h.Sum(nil), data.Len())
+ k.mtx.Lock()
+ if k.data == nil {
+ k.data = map[string][]byte{}
+ }
+ k.data[locator] = data.Bytes()
+ k.mtx.Unlock()
+ return BlockWriteResponse{Locator: locator, Replicas: 1}, nil
+}
+
+func (s *keepCacheSuite) TestBlockWrite(c *check.C) {
+ backend := &keepGatewayMemoryBacked{}
+ cache := DiskCache{
+ KeepGateway: backend,
+ MaxSize: 40000000,
+ Dir: c.MkDir(),
+ Logger: ctxlog.TestLogger(c),
+ }
+ ctx := context.Background()
+ real, err := cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: make([]byte, 100000),
+ })
+ c.Assert(err, check.IsNil)
+
+ // Write different data but supply the same hash. Should be
+ // rejected (even though our fake backend doesn't notice).
+ _, err = cache.BlockWrite(ctx, BlockWriteOptions{
+ Hash: real.Locator[:32],
+ Data: make([]byte, 10),
+ })
+ c.Check(err, check.ErrorMatches, `block hash .+ did not match provided hash .+`)
+
+ // Ensure the bogus write didn't overwrite (or delete) the
+ // real cached data associated with that hash.
+ delete(backend.data, real.Locator)
+ n, err := cache.ReadAt(real.Locator, make([]byte, 100), 0)
+ c.Check(n, check.Equals, 100)
+ c.Check(err, check.IsNil)
+}
+
+func (s *keepCacheSuite) TestMaxSize(c *check.C) {
+ backend := &keepGatewayMemoryBacked{}
+ cache := DiskCache{
+ KeepGateway: backend,
+ MaxSize: 40000000,
+ Dir: c.MkDir(),
+ Logger: ctxlog.TestLogger(c),
+ }
+ ctx := context.Background()
+ resp1, err := cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: make([]byte, 44000000),
+ })
+ c.Check(err, check.IsNil)
+
+ // Wait for tidy to finish, check that it doesn't delete the
+ // only block.
+ time.Sleep(time.Millisecond)
+ for atomic.LoadInt32(&cache.tidying) > 0 {
+ time.Sleep(time.Millisecond)
+ }
+ c.Check(atomic.LoadInt64(&cache.sizeMeasured), check.Equals, int64(44000000))
+
+ resp2, err := cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: make([]byte, 32000000),
+ })
+ c.Check(err, check.IsNil)
+ delete(backend.data, resp1.Locator)
+ delete(backend.data, resp2.Locator)
+
+ // Wait for tidy to finish, check that it deleted the older
+ // block.
+ time.Sleep(time.Millisecond)
+ for atomic.LoadInt32(&cache.tidying) > 0 {
+ time.Sleep(time.Millisecond)
+ }
+ c.Check(atomic.LoadInt64(&cache.sizeMeasured), check.Equals, int64(32000000))
+
+ n, err := cache.ReadAt(resp1.Locator, make([]byte, 2), 0)
+ c.Check(n, check.Equals, 0)
+ c.Check(err, check.ErrorMatches, `block not found: .*\+44000000`)
+
+ n, err = cache.ReadAt(resp2.Locator, make([]byte, 2), 0)
+ c.Check(n > 0, check.Equals, true)
+ c.Check(err, check.IsNil)
+}
+
+func (s *keepCacheSuite) TestConcurrentReadersNoRefresh(c *check.C) {
+ s.testConcurrentReaders(c, true, false)
+}
+func (s *keepCacheSuite) TestConcurrentReadersMangleCache(c *check.C) {
+ s.testConcurrentReaders(c, false, true)
+}
+func (s *keepCacheSuite) testConcurrentReaders(c *check.C, cannotRefresh, mangleCache bool) {
+ blksize := 64000000
+ backend := &keepGatewayMemoryBacked{}
+ cache := DiskCache{
+ KeepGateway: backend,
+ MaxSize: ByteSizeOrPercent(blksize),
+ Dir: c.MkDir(),
+ Logger: ctxlog.TestLogger(c),
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resp, err := cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: make([]byte, blksize),
+ })
+ c.Check(err, check.IsNil)
+ if cannotRefresh {
+ // Delete the block from the backing store, to ensure
+ // the cache doesn't rely on re-reading a block that
+ // it has just written.
+ delete(backend.data, resp.Locator)
+ }
+ if mangleCache {
+ // Replace cache files with truncated files (and
+ // delete them outright) while the ReadAt loop is
+ // running, to ensure the cache can re-fetch from the
+ // backend as needed.
+ var nRemove, nTrunc int
+ defer func() {
+ c.Logf("nRemove %d", nRemove)
+ c.Logf("nTrunc %d", nTrunc)
+ }()
+ go func() {
+ // Truncate/delete the cache file at various
+ // intervals. Readers should re-fetch/recover from
+ // this.
+ fnm := cache.cacheFile(resp.Locator)
+ for ctx.Err() == nil {
+ trunclen := rand.Int63() % int64(blksize*2)
+ if trunclen > int64(blksize) {
+ err := os.Remove(fnm)
+ if err == nil {
+ nRemove++
+ }
+ } else if os.WriteFile(fnm+"#", make([]byte, trunclen), 0700) == nil {
+ err := os.Rename(fnm+"#", fnm)
+ if err == nil {
+ nTrunc++
+ }
+ }
+ }
+ }()
+ }
+
+ failed := false
+ var wg sync.WaitGroup
+ var slots = make(chan bool, 100) // limit concurrency / memory usage
+ for i := 0; i < 20000; i++ {
+ offset := (i * 123456) % blksize
+ slots <- true
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer func() { <-slots }()
+ buf := make([]byte, 654321)
+ if offset+len(buf) > blksize {
+ buf = buf[:blksize-offset]
+ }
+ n, err := cache.ReadAt(resp.Locator, buf, offset)
+ if failed {
+ // don't fill logs with subsequent errors
+ return
+ }
+ if !c.Check(err, check.IsNil, check.Commentf("offset=%d", offset)) {
+ failed = true
+ }
+ c.Assert(n, check.Equals, len(buf))
+ }()
+ }
+ wg.Wait()
+}
+
+func (s *keepCacheSuite) TestStreaming(c *check.C) {
+ blksize := 64000000
+ backend := &keepGatewayMemoryBacked{
+ pauseBlockReadUntil: make(chan error),
+ pauseBlockReadAfter: blksize / 8,
+ }
+ cache := DiskCache{
+ KeepGateway: backend,
+ MaxSize: ByteSizeOrPercent(blksize),
+ Dir: c.MkDir(),
+ Logger: ctxlog.TestLogger(c),
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resp, err := cache.BlockWrite(ctx, BlockWriteOptions{
+ Data: make([]byte, blksize),
+ })
+ c.Check(err, check.IsNil)
+ os.RemoveAll(filepath.Join(cache.Dir, resp.Locator[:3]))
+
+ // Start a lot of concurrent requests for various ranges of
+ // the same block. Our backend will return the first 8MB and
+ // then pause. The requests that can be satisfied by the first
+ // 8MB of data should return quickly. The rest should wait,
+ // and return after we release pauseBlockReadUntil.
+ var wgEarly, wgLate sync.WaitGroup
+ var doneEarly, doneLate int32
+ for i := 0; i < 10000; i++ {
+ wgEarly.Add(1)
+ go func() {
+ offset := int(rand.Int63() % int64(blksize-benchReadSize))
+ if offset+benchReadSize > backend.pauseBlockReadAfter {
+ wgLate.Add(1)
+ defer wgLate.Done()
+ wgEarly.Done()
+ defer atomic.AddInt32(&doneLate, 1)
+ } else {
+ defer wgEarly.Done()
+ defer atomic.AddInt32(&doneEarly, 1)
+ }
+ buf := make([]byte, benchReadSize)
+ n, err := cache.ReadAt(resp.Locator, buf, offset)
+ c.Check(n, check.Equals, len(buf))
+ c.Check(err, check.IsNil)
+ }()
+ }
+
+ // Ensure all early ranges finish while backend request(s) are
+ // paused.
+ wgEarly.Wait()
+ c.Logf("doneEarly = %d", doneEarly)
+ c.Check(doneLate, check.Equals, int32(0))
+
+ // Unpause backend request(s).
+ close(backend.pauseBlockReadUntil)
+ wgLate.Wait()
+ c.Logf("doneLate = %d", doneLate)
+}
+
+var _ = check.Suite(&keepCacheBenchSuite{})
+
+type keepCacheBenchSuite struct {
+ blksize int
+ blkcount int
+ backend *keepGatewayMemoryBacked
+ cache *DiskCache
+ locators []string
+}
+
+func (s *keepCacheBenchSuite) SetUpTest(c *check.C) {
+ s.blksize = 64000000
+ s.blkcount = 8
+ s.backend = &keepGatewayMemoryBacked{}
+ s.cache = &DiskCache{
+ KeepGateway: s.backend,
+ MaxSize: ByteSizeOrPercent(s.blksize),
+ Dir: c.MkDir(),
+ Logger: ctxlog.TestLogger(c),
+ }
+ s.locators = make([]string, s.blkcount)
+ data := make([]byte, s.blksize)
+ for b := 0; b < s.blkcount; b++ {
+ for i := range data {
+ data[i] = byte(b)
+ }
+ resp, err := s.cache.BlockWrite(context.Background(), BlockWriteOptions{
+ Data: data,
+ })
+ c.Assert(err, check.IsNil)
+ s.locators[b] = resp.Locator
+ }
+}
+
+func (s *keepCacheBenchSuite) BenchmarkConcurrentReads(c *check.C) {
+ var wg sync.WaitGroup
+ for i := 0; i < c.N; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ buf := make([]byte, benchReadSize)
+ _, err := s.cache.ReadAt(s.locators[i%s.blkcount], buf, int((int64(i)*1234)%int64(s.blksize-benchReadSize)))
+ if err != nil {
+ c.Fail()
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+func (s *keepCacheBenchSuite) BenchmarkSequentialReads(c *check.C) {
+ buf := make([]byte, benchReadSize)
+ for i := 0; i < c.N; i++ {
+ _, err := s.cache.ReadAt(s.locators[i%s.blkcount], buf, int((int64(i)*1234)%int64(s.blksize-benchReadSize)))
+ if err != nil {
+ c.Fail()
+ }
+ }
+}
+
+const benchReadSize = 1000
+
+var _ = check.Suite(&fileOpsSuite{})
+
+type fileOpsSuite struct{}
+
+// BenchmarkOpenClose and BenchmarkKeepOpen can be used to measure the
+// potential performance improvement of caching filehandles rather
+// than opening/closing the cache file for each read.
+//
+// Results from a development machine indicate a ~3x throughput
+// improvement: ~636 MB/s when opening/closing the file for each
+// 1000-byte read vs. ~2 GB/s when opening the file once and doing
+// concurrent reads using the same file descriptor.
+func (s *fileOpsSuite) BenchmarkOpenClose(c *check.C) {
+ fnm := c.MkDir() + "/testfile"
+ os.WriteFile(fnm, make([]byte, 64000000), 0700)
+ var wg sync.WaitGroup
+ for i := 0; i < c.N; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ f, err := os.OpenFile(fnm, os.O_CREATE|os.O_RDWR, 0700)
+ if err != nil {
+ c.Fail()
+ return
+ }
+ _, err = f.ReadAt(make([]byte, benchReadSize), (int64(i)*1000000)%63123123)
+ if err != nil {
+ c.Fail()
+ return
+ }
+ f.Close()
+ }()
+ }
+ wg.Wait()
+}
+
+func (s *fileOpsSuite) BenchmarkKeepOpen(c *check.C) {
+ fnm := c.MkDir() + "/testfile"
+ os.WriteFile(fnm, make([]byte, 64000000), 0700)
+ f, err := os.OpenFile(fnm, os.O_CREATE|os.O_RDWR, 0700)
+ if err != nil {
+ c.Fail()
+ return
+ }
+ var wg sync.WaitGroup
+ for i := 0; i < c.N; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, err = f.ReadAt(make([]byte, benchReadSize), (int64(i)*1000000)%63123123)
+ if err != nil {
+ c.Fail()
+ return
+ }
+ }()
+ }
+ wg.Wait()
+ f.Close()
+}
diff --git a/sdk/go/arvados/keep_service.go b/sdk/go/arvados/keep_service.go
index eb7988422d..85750d8cfc 100644
--- a/sdk/go/arvados/keep_service.go
+++ b/sdk/go/arvados/keep_service.go
@@ -12,7 +12,10 @@ import (
"net/http"
"strconv"
"strings"
+ "sync/atomic"
"time"
+
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
)
// KeepService is an arvados#keepService record
@@ -30,7 +33,8 @@ type KeepService struct {
type KeepMount struct {
UUID string `json:"uuid"`
DeviceID string `json:"device_id"`
- ReadOnly bool `json:"read_only"`
+ AllowWrite bool `json:"allow_write"`
+ AllowTrash bool `json:"allow_trash"`
Replication int `json:"replication"`
StorageClasses map[string]bool `json:"storage_classes"`
}
@@ -142,16 +146,16 @@ func (s *KeepService) Untrash(ctx context.Context, c *Client, blk string) error
// IndexMount returns an unsorted list of blocks at the given mount point.
func (s *KeepService) IndexMount(ctx context.Context, c *Client, mountUUID string, prefix string) ([]KeepServiceIndexEntry, error) {
- return s.index(ctx, c, s.url("mounts/"+mountUUID+"/blocks?prefix="+prefix))
+ return s.index(ctx, c, prefix, s.url("mounts/"+mountUUID+"/blocks?prefix="+prefix))
}
// Index returns an unsorted list of blocks that can be retrieved from
// this server.
func (s *KeepService) Index(ctx context.Context, c *Client, prefix string) ([]KeepServiceIndexEntry, error) {
- return s.index(ctx, c, s.url("index/"+prefix))
+ return s.index(ctx, c, prefix, s.url("index/"+prefix))
}
-func (s *KeepService) index(ctx context.Context, c *Client, url string) ([]KeepServiceIndexEntry, error) {
+func (s *KeepService) index(ctx context.Context, c *Client, prefix, url string) ([]KeepServiceIndexEntry, error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, fmt.Errorf("NewRequestWithContext(%v): %v", url, err)
@@ -164,10 +168,30 @@ func (s *KeepService) index(ctx context.Context, c *Client, url string) ([]KeepS
}
defer resp.Body.Close()
+ var progress int64
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ go func() {
+ log := ctxlog.FromContext(ctx)
+ logticker := time.NewTicker(5 * time.Minute)
+ defer logticker.Stop()
+ for {
+ select {
+ case <-logticker.C:
+ log.Printf("index progress: received %d blocks from %s", atomic.LoadInt64(&progress), url)
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
var entries []KeepServiceIndexEntry
scanner := bufio.NewScanner(resp.Body)
sawEOF := false
for scanner.Scan() {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
if scanner.Err() != nil {
// If we encounter a read error (timeout,
// connection failure), stop now and return it
@@ -187,6 +211,9 @@ func (s *KeepService) index(ctx context.Context, c *Client, url string) ([]KeepS
if len(fields) != 2 {
return nil, fmt.Errorf("Malformed index line %q: %d fields", line, len(fields))
}
+ if !strings.HasPrefix(fields[0], prefix) {
+ return nil, fmt.Errorf("Index response included block %q despite asking for prefix %q", fields[0], prefix)
+ }
mtime, err := strconv.ParseInt(fields[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("Malformed index line %q: mtime: %v", line, err)
@@ -203,6 +230,7 @@ func (s *KeepService) index(ctx context.Context, c *Client, url string) ([]KeepS
SizedDigest: SizedDigest(fields[0]),
Mtime: mtime,
})
+ atomic.AddInt64(&progress, 1)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("Error scanning index response: %v", err)
diff --git a/sdk/go/arvados/limiter.go b/sdk/go/arvados/limiter.go
new file mode 100644
index 0000000000..dc944160ab
--- /dev/null
+++ b/sdk/go/arvados/limiter.go
@@ -0,0 +1,154 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+var (
+ requestLimiterQuietPeriod = time.Second
+ requestLimiterInitialLimit int64 = 8
+)
+
+type requestLimiter struct {
+ current int64
+ limit int64
+ maxlimit int64
+ lock sync.Mutex
+ cond *sync.Cond
+ quietUntil time.Time
+}
+
+// Acquire reserves one request slot, waiting if necessary.
+//
+// Acquire returns early if ctx cancels before a slot is available. It
+// is assumed in this case the caller will immediately notice
+// ctx.Err() != nil and call Release().
+func (rl *requestLimiter) Acquire(ctx context.Context) {
+ rl.lock.Lock()
+ if rl.cond == nil {
+ // First use of requestLimiter. Initialize.
+ rl.cond = sync.NewCond(&rl.lock)
+ rl.limit = requestLimiterInitialLimit
+ }
+ // Wait out the quiet period(s) immediately following a 503.
+ for ctx.Err() == nil {
+ delay := rl.quietUntil.Sub(time.Now())
+ if delay < 0 {
+ break
+ }
+ // Wait for the end of the quiet period, which started
+ // when we last received a 503 response.
+ rl.lock.Unlock()
+ timer := time.NewTimer(delay)
+ select {
+ case <-timer.C:
+ case <-ctx.Done():
+ timer.Stop()
+ }
+ rl.lock.Lock()
+ }
+ ready := make(chan struct{})
+ go func() {
+ // close ready when a slot is available _or_ we wake
+ // up and find ctx has been canceled (meaning Acquire
+ // has already returned, or is about to).
+ for rl.limit > 0 && rl.limit <= rl.current && ctx.Err() == nil {
+ rl.cond.Wait()
+ }
+ close(ready)
+ }()
+ select {
+ case <-ready:
+ // Wait() returned, so we have the lock.
+ rl.current++
+ rl.lock.Unlock()
+ case <-ctx.Done():
+ // When Wait() returns the lock to our goroutine
+ // (which might have already happened) we need to
+ // release it (if we don't do this now, the following
+ // Lock() can deadlock).
+ go func() {
+ <-ready
+ rl.lock.Unlock()
+ }()
+ // Note we may have current > limit until the caller
+ // calls Release().
+ rl.lock.Lock()
+ rl.current++
+ rl.lock.Unlock()
+ }
+}
+
+// Release releases a slot that has been reserved with Acquire.
+func (rl *requestLimiter) Release() {
+ rl.lock.Lock()
+ rl.current--
+ rl.lock.Unlock()
+ rl.cond.Signal()
+}
+
+// Report uses the return values from (*http.Client)Do() to adjust the
+// outgoing request limit (increase on success, decrease on 503).
+//
+// Return value is true if the response was a 503.
+func (rl *requestLimiter) Report(resp *http.Response, err error) bool {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+ is503 := false
+ if err != nil {
+ uerr := &url.Error{}
+ if errors.As(err, &uerr) && uerr.Err.Error() == "Service Unavailable" {
+ // This is how http.Client reports 503 from proxy server
+ is503 = true
+ } else {
+ return false
+ }
+ } else {
+ is503 = resp.StatusCode == http.StatusServiceUnavailable
+ }
+ if is503 {
+ if rl.limit == 0 {
+ // Concurrency was unlimited until now.
+ // Calculate new limit based on actual
+ // concurrency instead of previous limit.
+ rl.limit = rl.current
+ }
+ if time.Now().After(rl.quietUntil) {
+ // Reduce concurrency limit by half.
+ rl.limit = (rl.limit + 1) / 2
+ // Don't start any new calls (or reduce the
+ // limit even further on additional 503s) for
+ // a second.
+ rl.quietUntil = time.Now().Add(requestLimiterQuietPeriod)
+ }
+ return true
+ }
+ if err == nil && resp.StatusCode >= 200 && resp.StatusCode < 400 && rl.limit > 0 {
+ // After each non-server-error response, increase
+ // concurrency limit by at least 10% -- but not beyond
+ // 2x the highest concurrency level we've seen without
+ // a failure.
+ increase := rl.limit / 10
+ if increase < 1 {
+ increase = 1
+ }
+ rl.limit += increase
+ if max := rl.current * 2; max < rl.limit {
+ rl.limit = max
+ }
+ if rl.maxlimit > 0 && rl.maxlimit < rl.limit {
+ rl.limit = rl.maxlimit
+ }
+ rl.cond.Broadcast()
+ }
+ return false
+}
diff --git a/sdk/go/arvados/limiter_test.go b/sdk/go/arvados/limiter_test.go
new file mode 100644
index 0000000000..1e73b1c28f
--- /dev/null
+++ b/sdk/go/arvados/limiter_test.go
@@ -0,0 +1,110 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&limiterSuite{})
+
+type limiterSuite struct{}
+
+func (*limiterSuite) TestInitialLimit(c *C) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+ rl := requestLimiter{}
+
+ var wg sync.WaitGroup
+ wg.Add(int(requestLimiterInitialLimit))
+ for i := int64(0); i < requestLimiterInitialLimit; i++ {
+ go func() {
+ rl.Acquire(ctx)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ c.Check(rl.current, Equals, requestLimiterInitialLimit)
+ wg.Add(int(requestLimiterInitialLimit))
+ for i := int64(0); i < requestLimiterInitialLimit; i++ {
+ go func() {
+ rl.Release()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ c.Check(rl.current, Equals, int64(0))
+}
+
+func (*limiterSuite) TestCancelWhileWaitingForAcquire(c *C) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+ rl := requestLimiter{}
+
+ rl.Acquire(ctx)
+ rl.limit = 1
+ ctxShort, cancel := context.WithDeadline(ctx, time.Now().Add(time.Millisecond))
+ defer cancel()
+ rl.Acquire(ctxShort)
+ c.Check(rl.current, Equals, int64(2))
+ c.Check(ctxShort.Err(), NotNil)
+ rl.Release()
+ rl.Release()
+ c.Check(rl.current, Equals, int64(0))
+}
+
+func (*limiterSuite) TestReducedLimitAndQuietPeriod(c *C) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+ rl := requestLimiter{}
+
+ // Use a short quiet period to make tests faster
+ defer func(orig time.Duration) { requestLimiterQuietPeriod = orig }(requestLimiterQuietPeriod)
+ requestLimiterQuietPeriod = time.Second / 10
+
+ for i := 0; i < 5; i++ {
+ rl.Acquire(ctx)
+ }
+ rl.Report(&http.Response{StatusCode: http.StatusServiceUnavailable}, nil)
+ c.Check(rl.limit, Equals, requestLimiterInitialLimit/2)
+ for i := 0; i < 5; i++ {
+ rl.Release()
+ }
+
+ // Even with all slots released, we can't Acquire in the quiet
+ // period.
+
+ // (a) If our context expires before the end of the quiet
+ // period, we get back DeadlineExceeded -- without waiting for
+ // the end of the quiet period.
+ acquire := time.Now()
+ ctxShort, cancel := context.WithDeadline(ctx, time.Now().Add(requestLimiterQuietPeriod/10))
+ defer cancel()
+ rl.Acquire(ctxShort)
+ c.Check(ctxShort.Err(), Equals, context.DeadlineExceeded)
+ c.Check(time.Since(acquire) < requestLimiterQuietPeriod/2, Equals, true)
+ c.Check(rl.quietUntil.Sub(time.Now()) > requestLimiterQuietPeriod/2, Equals, true)
+ rl.Release()
+
+ // (b) If our context does not expire first, Acquire waits for
+ // the end of the quiet period.
+ ctxLong, cancel := context.WithDeadline(ctx, time.Now().Add(requestLimiterQuietPeriod*2))
+ defer cancel()
+ acquire = time.Now()
+ rl.Acquire(ctxLong)
+ c.Check(time.Since(acquire) > requestLimiterQuietPeriod/10, Equals, true)
+ c.Check(time.Since(acquire) < requestLimiterQuietPeriod, Equals, true)
+ c.Check(ctxLong.Err(), IsNil)
+ rl.Release()
+
+ // OK to call Report() with nil Response and non-nil error.
+ rl.Report(nil, errors.New("network error"))
+}
diff --git a/sdk/go/arvados/log.go b/sdk/go/arvados/log.go
index 6f72634e54..b5860d0593 100644
--- a/sdk/go/arvados/log.go
+++ b/sdk/go/arvados/log.go
@@ -10,14 +10,17 @@ import (
// Log is an arvados#log record
type Log struct {
- ID uint64 `json:"id"`
+ ID int64 `json:"id"`
UUID string `json:"uuid"`
+ OwnerUUID string `json:"owner_uuid"`
ObjectUUID string `json:"object_uuid"`
ObjectOwnerUUID string `json:"object_owner_uuid"`
EventType string `json:"event_type"`
- EventAt *time.Time `json:"event"`
+ EventAt time.Time `json:"event"`
+ Summary string `json:"summary"`
Properties map[string]interface{} `json:"properties"`
- CreatedAt *time.Time `json:"created_at"`
+ CreatedAt time.Time `json:"created_at"`
+ ModifiedAt time.Time `json:"modified_at"`
}
// LogList is an arvados#logList resource.
diff --git a/sdk/go/arvados/tls_certs.go b/sdk/go/arvados/tls_certs.go
new file mode 100644
index 0000000000..db52781339
--- /dev/null
+++ b/sdk/go/arvados/tls_certs.go
@@ -0,0 +1,23 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "os"
+
+// Load root CAs from /etc/arvados/ca-certificates.crt if it exists
+// and SSL_CERT_FILE does not already specify a different file.
+func init() {
+ envvar := "SSL_CERT_FILE"
+ certfile := "/etc/arvados/ca-certificates.crt"
+ if os.Getenv(envvar) != "" {
+ // Caller has already specified SSL_CERT_FILE.
+ return
+ }
+ if _, err := os.ReadFile(certfile); err != nil {
+ // Custom cert file is not present/readable.
+ return
+ }
+ os.Setenv(envvar, certfile)
+}
diff --git a/sdk/go/arvados/tls_certs_test.go b/sdk/go/arvados/tls_certs_test.go
new file mode 100644
index 0000000000..7900867715
--- /dev/null
+++ b/sdk/go/arvados/tls_certs_test.go
@@ -0,0 +1,32 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+ "os"
+ "os/exec"
+
+ check "gopkg.in/check.v1"
+)
+
+type tlsCertsSuite struct{}
+
+var _ = check.Suite(&tlsCertsSuite{})
+
+func (s *tlsCertsSuite) TestCustomCert(c *check.C) {
+ certfile := "/etc/arvados/ca-certificates.crt"
+ if _, err := os.Stat(certfile); err != nil {
+ c.Skip("custom cert file " + certfile + " does not exist")
+ }
+ out, err := exec.Command("bash", "-c", "SSL_CERT_FILE= go run tls_certs_test_showenv.go").CombinedOutput()
+ c.Logf("%s", out)
+ c.Assert(err, check.IsNil)
+ c.Check(string(out), check.Equals, certfile+"\n")
+
+ out, err = exec.Command("bash", "-c", "SSL_CERT_FILE=/dev/null go run tls_certs_test_showenv.go").CombinedOutput()
+ c.Logf("%s", out)
+ c.Assert(err, check.IsNil)
+ c.Check(string(out), check.Equals, "/dev/null\n")
+}
diff --git a/sdk/go/arvados/tls_certs_test_showenv.go b/sdk/go/arvados/tls_certs_test_showenv.go
new file mode 100644
index 0000000000..f2622cf11d
--- /dev/null
+++ b/sdk/go/arvados/tls_certs_test_showenv.go
@@ -0,0 +1,22 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build ignore
+
+// This is a test program invoked by tls_certs_test.go
+
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+var _ = arvados.Client{}
+
+func main() {
+ fmt.Println(os.Getenv("SSL_CERT_FILE"))
+}
diff --git a/sdk/go/arvados/vocabulary.go b/sdk/go/arvados/vocabulary.go
index bb1bec789f..1df43b5fb8 100644
--- a/sdk/go/arvados/vocabulary.go
+++ b/sdk/go/arvados/vocabulary.go
@@ -26,17 +26,28 @@ type VocabularyTag struct {
Values map[string]VocabularyTagValue `json:"values"`
}
-// Cannot have a constant map in Go, so we have to use a function
+// Cannot have a constant map in Go, so we have to use a function.
+// If you are adding a new system property, it SHOULD start with `arv:`,
+// and Check will allow it. This map is for historical exceptions that
+// predate standardizing on this prefix.
func (v *Vocabulary) systemTagKeys() map[string]bool {
return map[string]bool{
- "type": true,
- "template_uuid": true,
- "groups": true,
- "username": true,
- "image_timestamp": true,
+ // Collection keys - set by arvados-cwl-runner
+ "container_request": true,
+ "container_uuid": true,
+ "type": true,
+ // Collection keys - set by arv-keepdocker (on the way out)
"docker-image-repo-tag": true,
- "filters": true,
- "container_request": true,
+ // Container request keys - set by arvados-cwl-runner
+ "cwl_input": true,
+ "cwl_output": true,
+ "template_uuid": true,
+ // Group keys
+ "filters": true,
+ // Link keys
+ "groups": true,
+ "image_timestamp": true,
+ "username": true,
}
}
@@ -259,7 +270,7 @@ func (v *Vocabulary) Check(data map[string]interface{}) error {
}
for key, val := range data {
// Checks for key validity
- if v.reservedTagKeys[key] {
+ if strings.HasPrefix(key, "arv:") || v.reservedTagKeys[key] {
// Allow reserved keys to be used even if they are not defined in
// the vocabulary no matter its strictness.
continue
diff --git a/sdk/go/arvados/vocabulary_test.go b/sdk/go/arvados/vocabulary_test.go
index 84b9bf2295..af62833a31 100644
--- a/sdk/go/arvados/vocabulary_test.go
+++ b/sdk/go/arvados/vocabulary_test.go
@@ -230,14 +230,17 @@ func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
true, "",
&Vocabulary{
reservedTagKeys: map[string]bool{
- "type": true,
- "template_uuid": true,
- "groups": true,
- "username": true,
- "image_timestamp": true,
+ "container_request": true,
+ "container_uuid": true,
+ "cwl_input": true,
+ "cwl_output": true,
"docker-image-repo-tag": true,
"filters": true,
- "container_request": true,
+ "groups": true,
+ "image_timestamp": true,
+ "template_uuid": true,
+ "type": true,
+ "username": true,
},
StrictTags: false,
Tags: map[string]VocabularyTag{
@@ -298,6 +301,37 @@ func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
}
}
+func (s *VocabularySuite) TestValidSystemProperties(c *check.C) {
+ s.testVoc.StrictTags = true
+ properties := map[string]interface{}{
+ "arv:gitBranch": "main",
+ "arv:OK": true,
+ "arv:cost": 123,
+ }
+ c.Check(s.testVoc.Check(properties), check.IsNil)
+}
+
+func (s *VocabularySuite) TestSystemPropertiesPrefixTypo(c *check.C) {
+ s.testVoc.StrictTags = true
+ for _, key := range []string{
+ // Extra characters in prefix
+ "arv :foo",
+ " arv:foo",
+ // Wrong punctuation
+ "arv.foo",
+ "arv-foo",
+ "arv_foo",
+ // Wrong case
+ "Arv:foo",
+ // Wrong word
+ "arvados",
+ "arvados:foo",
+ } {
+ properties := map[string]interface{}{key: "value"}
+ c.Check(s.testVoc.Check(properties), check.NotNil)
+ }
+}
+
func (s *VocabularySuite) TestValidationErrors(c *check.C) {
tests := []struct {
name string
diff --git a/sdk/go/arvadosclient/arvadosclient.go b/sdk/go/arvadosclient/arvadosclient.go
index 24070c5b06..d0ebdc1b01 100644
--- a/sdk/go/arvadosclient/arvadosclient.go
+++ b/sdk/go/arvadosclient/arvadosclient.go
@@ -9,16 +9,12 @@ package arvadosclient
import (
"bytes"
"crypto/tls"
- "crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
- "io/ioutil"
- "log"
"net/http"
"net/url"
- "os"
"strings"
"sync"
"time"
@@ -103,16 +99,17 @@ type ArvadosClient struct {
// Client object shared by client requests. Supports HTTP KeepAlive.
Client *http.Client
- // If true, sets the X-External-Client header to indicate
- // the client is outside the cluster.
- External bool
-
// Base URIs of Keep services, e.g., {"https://host1:8443",
// "https://host2:8443"}. If this is nil, Keep clients will
// use the arvados.v1.keep_services.accessible API to discover
// available services.
KeepServiceURIs []string
+ // Maximum disk cache size in bytes or percent of total
+ // filesystem size. If zero, use default, currently 10% of
+ // filesystem size.
+ DiskCacheSize arvados.ByteSizeOrPercent
+
// Discovery document
DiscoveryDoc Dict
@@ -125,40 +122,10 @@ type ArvadosClient struct {
RequestID string
}
-var CertFiles = []string{
- "/etc/arvados/ca-certificates.crt",
- "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
- "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
-}
-
// MakeTLSConfig sets up TLS configuration for communicating with
// Arvados and Keep services.
func MakeTLSConfig(insecure bool) *tls.Config {
- tlsconfig := tls.Config{InsecureSkipVerify: insecure}
-
- if !insecure {
- // Use the first entry in CertFiles that we can read
- // certificates from. If none of those work out, use
- // the Go defaults.
- certs := x509.NewCertPool()
- for _, file := range CertFiles {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- if !os.IsNotExist(err) {
- log.Printf("proceeding without loading cert file %q: %s", file, err)
- }
- continue
- }
- if !certs.AppendCertsFromPEM(data) {
- log.Printf("unable to load any certificates from %v", file)
- continue
- }
- tlsconfig.RootCAs = certs
- break
- }
- }
-
- return &tlsconfig
+ return &tls.Config{InsecureSkipVerify: insecure}
}
// New returns an ArvadosClient using the given arvados.Client
@@ -166,19 +133,23 @@ func MakeTLSConfig(insecure bool) *tls.Config {
// fields from configuration files but still need to use the
// arvadosclient.ArvadosClient package.
func New(c *arvados.Client) (*ArvadosClient, error) {
- ac := &ArvadosClient{
- Scheme: "https",
- ApiServer: c.APIHost,
- ApiToken: c.AuthToken,
- ApiInsecure: c.Insecure,
- Client: &http.Client{
+ hc := c.Client
+ if hc == nil {
+ hc = &http.Client{
Timeout: 5 * time.Minute,
Transport: &http.Transport{
TLSClientConfig: MakeTLSConfig(c.Insecure)},
- },
- External: false,
+ }
+ }
+ ac := &ArvadosClient{
+ Scheme: "https",
+ ApiServer: c.APIHost,
+ ApiToken: c.AuthToken,
+ ApiInsecure: c.Insecure,
+ Client: hc,
Retries: 2,
KeepServiceURIs: c.KeepServiceURIs,
+ DiskCacheSize: c.DiskCacheSize,
lastClosedIdlesAt: time.Now(),
}
@@ -187,15 +158,9 @@ func New(c *arvados.Client) (*ArvadosClient, error) {
// MakeArvadosClient creates a new ArvadosClient using the standard
// environment variables ARVADOS_API_HOST, ARVADOS_API_TOKEN,
-// ARVADOS_API_HOST_INSECURE, ARVADOS_EXTERNAL_CLIENT, and
-// ARVADOS_KEEP_SERVICES.
-func MakeArvadosClient() (ac *ArvadosClient, err error) {
- ac, err = New(arvados.NewClientFromEnv())
- if err != nil {
- return
- }
- ac.External = StringBool(os.Getenv("ARVADOS_EXTERNAL_CLIENT"))
- return
+// ARVADOS_API_HOST_INSECURE, and ARVADOS_KEEP_SERVICES.
+func MakeArvadosClient() (*ArvadosClient, error) {
+ return New(arvados.NewClientFromEnv())
}
// CallRaw is the same as Call() but returns a Reader that reads the
@@ -238,77 +203,37 @@ func (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string,
vals.Set(k, string(m))
}
}
-
- retryable := false
- switch method {
- case "GET", "HEAD", "PUT", "OPTIONS", "DELETE":
- retryable = true
- }
-
- // Non-retryable methods such as POST are not safe to retry automatically,
- // so we minimize such failures by always using a new or recently active socket
- if !retryable {
- if time.Since(c.lastClosedIdlesAt) > MaxIdleConnectionDuration {
- c.lastClosedIdlesAt = time.Now()
- c.Client.Transport.(*http.Transport).CloseIdleConnections()
- }
- }
-
- // Make the request
var req *http.Request
- var resp *http.Response
-
- for attempt := 0; attempt <= c.Retries; attempt++ {
- if method == "GET" || method == "HEAD" {
- u.RawQuery = vals.Encode()
- if req, err = http.NewRequest(method, u.String(), nil); err != nil {
- return nil, err
- }
- } else {
- if req, err = http.NewRequest(method, u.String(), bytes.NewBufferString(vals.Encode())); err != nil {
- return nil, err
- }
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
- }
-
- // Add api token header
- req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", c.ApiToken))
- if c.RequestID != "" {
- req.Header.Add("X-Request-Id", c.RequestID)
- }
- if c.External {
- req.Header.Add("X-External-Client", "1")
- }
-
- resp, err = c.Client.Do(req)
- if err != nil {
- if retryable {
- time.Sleep(RetryDelay)
- continue
- } else {
- return nil, err
- }
- }
-
- if resp.StatusCode == http.StatusOK {
- return resp.Body, nil
+ if method == "GET" || method == "HEAD" {
+ u.RawQuery = vals.Encode()
+ if req, err = http.NewRequest(method, u.String(), nil); err != nil {
+ return nil, err
}
-
- defer resp.Body.Close()
-
- switch resp.StatusCode {
- case 408, 409, 422, 423, 500, 502, 503, 504:
- time.Sleep(RetryDelay)
- continue
- default:
- return nil, newAPIServerError(c.ApiServer, resp)
+ } else {
+ if req, err = http.NewRequest(method, u.String(), bytes.NewBufferString(vals.Encode())); err != nil {
+ return nil, err
}
+ req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
}
-
- if resp != nil {
+ if c.RequestID != "" {
+ req.Header.Add("X-Request-Id", c.RequestID)
+ }
+ client := arvados.Client{
+ Client: c.Client,
+ APIHost: c.ApiServer,
+ AuthToken: c.ApiToken,
+ Insecure: c.ApiInsecure,
+ Timeout: 30 * RetryDelay * time.Duration(c.Retries),
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ defer resp.Body.Close()
return nil, newAPIServerError(c.ApiServer, resp)
}
- return nil, err
+ return resp.Body, nil
}
func newAPIServerError(ServerAddress string, resp *http.Response) APIServerError {
@@ -342,12 +267,12 @@ func newAPIServerError(ServerAddress string, resp *http.Response) APIServerError
// Call an API endpoint and parse the JSON response into an object.
//
-// method - HTTP method: GET, HEAD, PUT, POST, PATCH or DELETE.
-// resourceType - the type of arvados resource to act on (e.g., "collections", "pipeline_instances").
-// uuid - the uuid of the specific item to access. May be empty.
-// action - API method name (e.g., "lock"). This is often empty if implied by method and uuid.
-// parameters - method parameters.
-// output - a map or annotated struct which is a legal target for encoding/json/Decoder.
+// method - HTTP method: GET, HEAD, PUT, POST, PATCH or DELETE.
+// resourceType - the type of arvados resource to act on (e.g., "collections", "pipeline_instances").
+// uuid - the uuid of the specific item to access. May be empty.
+// action - API method name (e.g., "lock"). This is often empty if implied by method and uuid.
+// parameters - method parameters.
+// output - a map or annotated struct which is a legal target for encoding/json/Decoder.
//
// Returns a non-nil error if an error occurs making the API call, the
// API responds with a non-successful HTTP status, or an error occurs
diff --git a/sdk/go/arvadosclient/arvadosclient_test.go b/sdk/go/arvadosclient/arvadosclient_test.go
index 27e23c1aea..b074e21e81 100644
--- a/sdk/go/arvadosclient/arvadosclient_test.go
+++ b/sdk/go/arvadosclient/arvadosclient_test.go
@@ -31,7 +31,7 @@ type ServerRequiredSuite struct{}
func (s *ServerRequiredSuite) SetUpSuite(c *C) {
arvadostest.StartKeep(2, false)
- RetryDelay = 0
+ RetryDelay = 2 * time.Second
}
func (s *ServerRequiredSuite) TearDownSuite(c *C) {
@@ -248,7 +248,7 @@ func (s *UnitSuite) TestPDHMatch(c *C) {
type MockArvadosServerSuite struct{}
func (s *MockArvadosServerSuite) SetUpSuite(c *C) {
- RetryDelay = 0
+ RetryDelay = 100 * time.Millisecond
}
func (s *MockArvadosServerSuite) SetUpTest(c *C) {
@@ -279,15 +279,17 @@ type APIStub struct {
}
func (h *APIStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
- if req.URL.Path == "/redirect-loop" {
- http.Redirect(resp, req, "/redirect-loop", http.StatusFound)
- return
- }
- if h.respStatus[h.retryAttempts] < 0 {
- // Fail the client's Do() by starting a redirect loop
- http.Redirect(resp, req, "/redirect-loop", http.StatusFound)
+ if status := h.respStatus[h.retryAttempts]; status < 0 {
+ // Fail the client's Do() by hanging up without
+ // sending an HTTP response header.
+ conn, _, err := resp.(http.Hijacker).Hijack()
+ if err != nil {
+ panic(err)
+ }
+ conn.Write([]byte("zzzzzzzzzz"))
+ conn.Close()
} else {
- resp.WriteHeader(h.respStatus[h.retryAttempts])
+ resp.WriteHeader(status)
resp.Write([]byte(h.responseBody[h.retryAttempts]))
}
h.retryAttempts++
@@ -302,22 +304,22 @@ func (s *MockArvadosServerSuite) TestWithRetries(c *C) {
"create", 0, 200, []int{200, 500}, []string{`{"ok":"ok"}`, ``},
},
{
- "get", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+ "get", 0, 423, []int{500, 500, 423, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
},
{
- "create", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+ "create", 0, 423, []int{500, 500, 423, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
},
{
- "update", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+ "update", 0, 422, []int{500, 500, 422, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
},
{
- "delete", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+ "delete", 0, 422, []int{500, 500, 422, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
},
{
- "get", 0, 502, []int{500, 500, 502, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+ "get", 0, 401, []int{500, 502, 401, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
},
{
- "create", 0, 502, []int{500, 500, 502, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+ "create", 0, 422, []int{500, 502, 422, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
},
{
"get", 0, 200, []int{500, 500, 200}, []string{``, ``, `{"ok":"ok"}`},
@@ -337,6 +339,12 @@ func (s *MockArvadosServerSuite) TestWithRetries(c *C) {
{
"create", 0, 401, []int{401, 200}, []string{``, `{"ok":"ok"}`},
},
+ {
+ "create", 0, 403, []int{403, 200}, []string{``, `{"ok":"ok"}`},
+ },
+ {
+ "create", 0, 422, []int{422, 200}, []string{``, `{"ok":"ok"}`},
+ },
{
"get", 0, 404, []int{404, 200}, []string{``, `{"ok":"ok"}`},
},
@@ -352,11 +360,13 @@ func (s *MockArvadosServerSuite) TestWithRetries(c *C) {
{
"get", 0, 200, []int{-1, -1, 200}, []string{``, ``, `{"ok":"ok"}`},
},
- // "POST" is not safe to retry: fail after one error
+ // "POST" protocol error is safe to retry
{
- "create", 0, -1, []int{-1, 200}, []string{``, `{"ok":"ok"}`},
+ "create", 0, 200, []int{-1, 200}, []string{``, `{"ok":"ok"}`},
},
} {
+ c.Logf("stub: %#v", stub)
+
api, err := RunFakeArvadosServer(&stub)
c.Check(err, IsNil)
@@ -396,7 +406,9 @@ func (s *MockArvadosServerSuite) TestWithRetries(c *C) {
default:
c.Check(err, NotNil)
c.Check(err, ErrorMatches, fmt.Sprintf("arvados API server error: %d.*", stub.expected))
- c.Check(err.(APIServerError).HttpStatusCode, Equals, stub.expected)
+ if c.Check(err, FitsTypeOf, APIServerError{}) {
+ c.Check(err.(APIServerError).HttpStatusCode, Equals, stub.expected)
+ }
}
}
}
diff --git a/sdk/go/arvadostest/api.go b/sdk/go/arvadostest/api.go
index d6da579d6b..e1827b5d1f 100644
--- a/sdk/go/arvadostest/api.go
+++ b/sdk/go/arvadostest/api.go
@@ -8,12 +8,15 @@ import (
"context"
"encoding/json"
"errors"
+ "io"
+ "net/http"
"net/url"
"reflect"
"runtime"
"sync"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
)
var ErrStubUnimplemented = errors.New("stub unimplemented")
@@ -37,6 +40,10 @@ func (as *APIStub) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error
as.appendCall(ctx, as.VocabularyGet, nil)
return arvados.Vocabulary{}, as.Error
}
+func (as *APIStub) DiscoveryDocument(ctx context.Context) (arvados.DiscoveryDocument, error) {
+ as.appendCall(ctx, as.DiscoveryDocument, nil)
+ return arvados.DiscoveryDocument{}, as.Error
+}
func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
as.appendCall(ctx, as.Login, options)
return arvados.LoginResponse{}, as.Error
@@ -45,6 +52,26 @@ func (as *APIStub) Logout(ctx context.Context, options arvados.LogoutOptions) (a
as.appendCall(ctx, as.Logout, options)
return arvados.LogoutResponse{}, as.Error
}
+func (as *APIStub) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {
+ as.appendCall(ctx, as.AuthorizedKeyCreate, options)
+ return arvados.AuthorizedKey{}, as.Error
+}
+func (as *APIStub) AuthorizedKeyUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.AuthorizedKey, error) {
+ as.appendCall(ctx, as.AuthorizedKeyUpdate, options)
+ return arvados.AuthorizedKey{}, as.Error
+}
+func (as *APIStub) AuthorizedKeyGet(ctx context.Context, options arvados.GetOptions) (arvados.AuthorizedKey, error) {
+ as.appendCall(ctx, as.AuthorizedKeyGet, options)
+ return arvados.AuthorizedKey{}, as.Error
+}
+func (as *APIStub) AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {
+ as.appendCall(ctx, as.AuthorizedKeyList, options)
+ return arvados.AuthorizedKeyList{}, as.Error
+}
+func (as *APIStub) AuthorizedKeyDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.AuthorizedKey, error) {
+ as.appendCall(ctx, as.AuthorizedKeyDelete, options)
+ return arvados.AuthorizedKey{}, as.Error
+}
func (as *APIStub) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
as.appendCall(ctx, as.CollectionCreate, options)
return arvados.Collection{}, as.Error
@@ -89,6 +116,10 @@ func (as *APIStub) ContainerUpdate(ctx context.Context, options arvados.UpdateOp
as.appendCall(ctx, as.ContainerUpdate, options)
return arvados.Container{}, as.Error
}
+func (as *APIStub) ContainerPriorityUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
+ as.appendCall(ctx, as.ContainerPriorityUpdate, options)
+ return arvados.Container{}, as.Error
+}
func (as *APIStub) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
as.appendCall(ctx, as.ContainerGet, options)
return arvados.Container{}, as.Error
@@ -137,6 +168,26 @@ func (as *APIStub) ContainerRequestDelete(ctx context.Context, options arvados.D
as.appendCall(ctx, as.ContainerRequestDelete, options)
return arvados.ContainerRequest{}, as.Error
}
+func (as *APIStub) ContainerRequestContainerStatus(ctx context.Context, options arvados.GetOptions) (arvados.ContainerStatus, error) {
+ as.appendCall(ctx, as.ContainerRequestContainerStatus, options)
+ return arvados.ContainerStatus{}, as.Error
+}
+func (as *APIStub) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (http.Handler, error) {
+ as.appendCall(ctx, as.ContainerRequestLog, options)
+ // Return a handler that responds with the configured
+ // error/success status.
+ return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ if as.Error == nil {
+ w.WriteHeader(http.StatusOK)
+ } else if err := httpserver.HTTPStatusError(nil); errors.As(as.Error, &err) {
+ w.WriteHeader(err.HTTPStatus())
+ io.WriteString(w, err.Error())
+ } else {
+ w.WriteHeader(http.StatusInternalServerError)
+ io.WriteString(w, err.Error())
+ }
+ }), nil
+}
func (as *APIStub) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {
as.appendCall(ctx, as.GroupCreate, options)
return arvados.Group{}, as.Error
@@ -193,6 +244,26 @@ func (as *APIStub) LinkDelete(ctx context.Context, options arvados.DeleteOptions
as.appendCall(ctx, as.LinkDelete, options)
return arvados.Link{}, as.Error
}
+func (as *APIStub) LogCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Log, error) {
+ as.appendCall(ctx, as.LogCreate, options)
+ return arvados.Log{}, as.Error
+}
+func (as *APIStub) LogUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Log, error) {
+ as.appendCall(ctx, as.LogUpdate, options)
+ return arvados.Log{}, as.Error
+}
+func (as *APIStub) LogGet(ctx context.Context, options arvados.GetOptions) (arvados.Log, error) {
+ as.appendCall(ctx, as.LogGet, options)
+ return arvados.Log{}, as.Error
+}
+func (as *APIStub) LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {
+ as.appendCall(ctx, as.LogList, options)
+ return arvados.LogList{}, as.Error
+}
+func (as *APIStub) LogDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Log, error) {
+ as.appendCall(ctx, as.LogDelete, options)
+ return arvados.Log{}, as.Error
+}
func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
as.appendCall(ctx, as.SpecimenCreate, options)
return arvados.Specimen{}, as.Error
@@ -293,6 +364,26 @@ func (as *APIStub) APIClientAuthorizationGet(ctx context.Context, options arvado
as.appendCall(ctx, as.APIClientAuthorizationGet, options)
return arvados.APIClientAuthorization{}, as.Error
}
+func (as *APIStub) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ as.appendCall(context.TODO(), as.ReadAt, struct {
+ locator string
+ dst []byte
+ offset int
+ }{locator, dst, offset})
+ return 0, as.Error
+}
+func (as *APIStub) BlockRead(ctx context.Context, options arvados.BlockReadOptions) (int, error) {
+ as.appendCall(ctx, as.BlockRead, options)
+ return 0, as.Error
+}
+func (as *APIStub) BlockWrite(ctx context.Context, options arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+ as.appendCall(ctx, as.BlockWrite, options)
+ return arvados.BlockWriteResponse{}, as.Error
+}
+func (as *APIStub) LocalLocator(locator string) (int, error) {
+ as.appendCall(context.TODO(), as.LocalLocator, locator)
+ return 0, as.Error
+}
func (as *APIStub) appendCall(ctx context.Context, method interface{}, options interface{}) {
as.mtx.Lock()
diff --git a/sdk/go/arvadostest/db.go b/sdk/go/arvadostest/db.go
index c20f61db26..d39f3c6fcb 100644
--- a/sdk/go/arvadostest/db.go
+++ b/sdk/go/arvadostest/db.go
@@ -5,11 +5,9 @@
package arvadostest
import (
- "context"
-
- "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/jmoiron/sqlx"
+
// sqlx needs lib/pq to talk to PostgreSQL
_ "github.com/lib/pq"
"gopkg.in/check.v1"
@@ -21,14 +19,3 @@ func DB(c *check.C, cluster *arvados.Cluster) *sqlx.DB {
c.Assert(err, check.IsNil)
return db
}
-
-// TransactionContext returns a context suitable for running a test
-// case in a new transaction, and a rollback func which the caller
-// should call after the test.
-func TransactionContext(c *check.C, db *sqlx.DB) (ctx context.Context, rollback func()) {
- tx, err := db.Beginx()
- c.Assert(err, check.IsNil)
- return ctrlctx.NewWithTransaction(context.Background(), tx), func() {
- c.Check(tx.Rollback(), check.IsNil)
- }
-}
diff --git a/sdk/go/arvadostest/fixtures.go b/sdk/go/arvadostest/fixtures.go
index ec55725412..3b8a618fea 100644
--- a/sdk/go/arvadostest/fixtures.go
+++ b/sdk/go/arvadostest/fixtures.go
@@ -32,12 +32,14 @@ const (
HelloWorldPdh = "55713e6a34081eb03609e7ad5fcad129+62"
MultilevelCollection1 = "zzzzz-4zz18-pyw8yp9g3pr7irn"
+ MultilevelCollection1PDH = "f9ddda46bb293b6847da984e3aa735db+290"
StorageClassesDesiredDefaultConfirmedDefault = "zzzzz-4zz18-3t236wr12769tga"
StorageClassesDesiredArchiveConfirmedDefault = "zzzzz-4zz18-3t236wr12769qqa"
EmptyCollectionUUID = "zzzzz-4zz18-gs9ooj1h9sd5mde"
- AProjectUUID = "zzzzz-j7d0g-v955i6s2oi1cbso"
- ASubprojectUUID = "zzzzz-j7d0g-axqo7eu9pwvna1x"
+ AProjectUUID = "zzzzz-j7d0g-v955i6s2oi1cbso"
+ ASubprojectUUID = "zzzzz-j7d0g-axqo7eu9pwvna1x"
+ AFilterGroupUUID = "zzzzz-j7d0g-thisfiltergroup"
FooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
FooAndBarFilesInDirPDH = "870369fc72738603c2fad16664e50e2d+58"
@@ -83,8 +85,11 @@ const (
Repository2UUID = "zzzzz-s0uqq-382brsig8rp3667"
Repository2Name = "active/foo2"
- FooCollectionSharingTokenUUID = "zzzzz-gj3su-gf02tdm4g1z3e3u"
- FooCollectionSharingToken = "iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss"
+ FooFileCollectionUUID = "zzzzz-4zz18-znfnqtbbv4spc3w"
+ FooFileCollectionSharingTokenUUID = "zzzzz-gj3su-gf02tdm4g1z3e3u"
+ FooFileCollectionSharingToken = "iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss"
+ BarFileCollectionUUID = "zzzzz-4zz18-ehbhgtheo8909or"
+ BarFileCollectionPDH = "fa7aeb5140e2848d39b416daeef4ffc5+45"
WorkflowWithDefinitionYAMLUUID = "zzzzz-7fd4e-validworkfloyml"
diff --git a/sdk/go/arvadostest/keep_stub.go b/sdk/go/arvadostest/keep_stub.go
new file mode 100644
index 0000000000..ddfa3909bb
--- /dev/null
+++ b/sdk/go/arvadostest/keep_stub.go
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+type KeepStub struct{}
diff --git a/sdk/go/arvadostest/metrics.go b/sdk/go/arvadostest/metrics.go
new file mode 100644
index 0000000000..5fe1d607bf
--- /dev/null
+++ b/sdk/go/arvadostest/metrics.go
@@ -0,0 +1,22 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+ "bytes"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/expfmt"
+)
+
+func GatherMetricsAsString(reg *prometheus.Registry) string {
+ buf := bytes.NewBuffer(nil)
+ enc := expfmt.NewEncoder(buf, expfmt.FmtText)
+ got, _ := reg.Gather()
+ for _, mf := range got {
+ enc.Encode(mf)
+ }
+ return buf.String()
+}
diff --git a/sdk/go/arvadostest/oidc_provider.go b/sdk/go/arvadostest/oidc_provider.go
index 087adc4b24..31a2667122 100644
--- a/sdk/go/arvadostest/oidc_provider.go
+++ b/sdk/go/arvadostest/oidc_provider.go
@@ -9,6 +9,7 @@ import (
"crypto/rsa"
"encoding/base64"
"encoding/json"
+ "fmt"
"net/http"
"net/http/httptest"
"net/url"
@@ -32,14 +33,19 @@ type OIDCProvider struct {
AuthGivenName string
AuthFamilyName string
AccessTokenPayload map[string]interface{}
+ // end_session_endpoint metadata URL.
+ // If nil or empty, not included in discovery.
+ // If relative, built from Issuer.URL.
+ EndSessionEndpoint *url.URL
PeopleAPIResponse map[string]interface{}
// send incoming /userinfo requests to HoldUserInfo (if not
// nil), then receive from ReleaseUserInfo (if not nil),
// before responding (these are used to set up races)
- HoldUserInfo chan *http.Request
- ReleaseUserInfo chan struct{}
+ HoldUserInfo chan *http.Request
+ ReleaseUserInfo chan struct{}
+ UserInfoErrorStatus int // if non-zero, return this http status (probably 5xx)
key *rsa.PrivateKey
Issuer *httptest.Server
@@ -69,13 +75,26 @@ func (p *OIDCProvider) serveOIDC(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
switch req.URL.Path {
case "/.well-known/openid-configuration":
- json.NewEncoder(w).Encode(map[string]interface{}{
+ configuration := map[string]interface{}{
"issuer": p.Issuer.URL,
"authorization_endpoint": p.Issuer.URL + "/auth",
"token_endpoint": p.Issuer.URL + "/token",
"jwks_uri": p.Issuer.URL + "/jwks",
"userinfo_endpoint": p.Issuer.URL + "/userinfo",
- })
+ }
+ if p.EndSessionEndpoint == nil {
+ // Not included in configuration
+ } else if p.EndSessionEndpoint.Scheme != "" {
+ configuration["end_session_endpoint"] = p.EndSessionEndpoint.String()
+ } else {
+ u, err := url.Parse(p.Issuer.URL)
+ p.c.Check(err, check.IsNil,
+ check.Commentf("error parsing IssuerURL for EndSessionEndpoint"))
+ u.Scheme = "https"
+ u.Path = u.Path + p.EndSessionEndpoint.Path
+ configuration["end_session_endpoint"] = u.String()
+ }
+ json.NewEncoder(w).Encode(configuration)
case "/token":
var clientID, clientSecret string
auth, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(req.Header.Get("Authorization"), "Basic "))
@@ -138,6 +157,11 @@ func (p *OIDCProvider) serveOIDC(w http.ResponseWriter, req *http.Request) {
if p.ReleaseUserInfo != nil {
<-p.ReleaseUserInfo
}
+ if p.UserInfoErrorStatus > 0 {
+ w.WriteHeader(p.UserInfoErrorStatus)
+ fmt.Fprintf(w, "%T error body", p)
+ return
+ }
authhdr := req.Header.Get("Authorization")
if _, err := jwt.ParseSigned(strings.TrimPrefix(authhdr, "Bearer ")); err != nil {
p.c.Logf("OIDCProvider: bad auth %q", authhdr)
diff --git a/sdk/go/arvadostest/proxy.go b/sdk/go/arvadostest/proxy.go
index 48700d8b18..85d433089a 100644
--- a/sdk/go/arvadostest/proxy.go
+++ b/sdk/go/arvadostest/proxy.go
@@ -11,6 +11,7 @@ import (
"net/http/httptest"
"net/http/httputil"
"net/url"
+ "sync"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -26,6 +27,12 @@ type Proxy struct {
// A dump of each request that has been proxied.
RequestDumps [][]byte
+
+ // If non-nil, func will be called on each incoming request
+ // before proxying it.
+ Director func(*http.Request)
+
+ wg sync.WaitGroup
}
// NewProxy returns a new Proxy that saves a dump of each reqeust
@@ -62,11 +69,25 @@ func NewProxy(c *check.C, svc arvados.Service) *Proxy {
Server: srv,
URL: u,
}
+ var mtx sync.Mutex
rp.Director = func(r *http.Request) {
+ proxy.wg.Add(1)
+ defer proxy.wg.Done()
+ if proxy.Director != nil {
+ proxy.Director(r)
+ }
dump, _ := httputil.DumpRequest(r, true)
+ mtx.Lock()
proxy.RequestDumps = append(proxy.RequestDumps, dump)
+ mtx.Unlock()
r.URL.Scheme = target.Scheme
r.URL.Host = target.Host
}
return proxy
}
+
+// Wait waits until all of the proxied requests that have been sent to
+// Director() have also been recorded in RequestDumps.
+func (proxy *Proxy) Wait() {
+ proxy.wg.Wait()
+}
diff --git a/sdk/go/auth/auth.go b/sdk/go/auth/auth.go
index f1c2e243b5..da9b4ea5b8 100644
--- a/sdk/go/auth/auth.go
+++ b/sdk/go/auth/auth.go
@@ -54,13 +54,13 @@ func (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {
// Load plain token from "Authorization: OAuth2 ..." header
// (typically used by smart API clients)
if toks := strings.SplitN(r.Header.Get("Authorization"), " ", 2); len(toks) == 2 && (toks[0] == "OAuth2" || toks[0] == "Bearer") {
- a.Tokens = append(a.Tokens, toks[1])
+ a.Tokens = append(a.Tokens, strings.TrimSpace(toks[1]))
}
// Load base64-encoded token from "Authorization: Basic ..."
// header (typically used by git via credential helper)
if _, password, ok := r.BasicAuth(); ok {
- a.Tokens = append(a.Tokens, password)
+ a.Tokens = append(a.Tokens, strings.TrimSpace(password))
}
// Load tokens from query string. It's generally not a good
@@ -76,7 +76,9 @@ func (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {
// find/report decoding errors in a suitable way.
qvalues, _ := url.ParseQuery(r.URL.RawQuery)
if val, ok := qvalues["api_token"]; ok {
- a.Tokens = append(a.Tokens, val...)
+ for _, token := range val {
+ a.Tokens = append(a.Tokens, strings.TrimSpace(token))
+ }
}
a.loadTokenFromCookie(r)
@@ -94,7 +96,7 @@ func (a *Credentials) loadTokenFromCookie(r *http.Request) {
if err != nil {
return
}
- a.Tokens = append(a.Tokens, string(token))
+ a.Tokens = append(a.Tokens, strings.TrimSpace(string(token)))
}
// LoadTokensFromHTTPRequestBody loads credentials from the request
@@ -111,7 +113,7 @@ func (a *Credentials) LoadTokensFromHTTPRequestBody(r *http.Request) error {
return err
}
if t := r.PostFormValue("api_token"); t != "" {
- a.Tokens = append(a.Tokens, t)
+ a.Tokens = append(a.Tokens, strings.TrimSpace(t))
}
return nil
}
diff --git a/sdk/go/auth/handlers_test.go b/sdk/go/auth/handlers_test.go
index 362aeb7f04..85ea8893a5 100644
--- a/sdk/go/auth/handlers_test.go
+++ b/sdk/go/auth/handlers_test.go
@@ -7,6 +7,8 @@ package auth
import (
"net/http"
"net/http/httptest"
+ "net/url"
+ "strings"
"testing"
check "gopkg.in/check.v1"
@@ -32,9 +34,36 @@ func (s *HandlersSuite) SetUpTest(c *check.C) {
func (s *HandlersSuite) TestLoadToken(c *check.C) {
handler := LoadToken(s)
handler.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest("GET", "/foo/bar?api_token=xyzzy", nil))
- c.Assert(s.gotCredentials, check.NotNil)
- c.Assert(s.gotCredentials.Tokens, check.HasLen, 1)
- c.Check(s.gotCredentials.Tokens[0], check.Equals, "xyzzy")
+ c.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{"xyzzy"})
+}
+
+// Ignore leading and trailing spaces, newlines, etc. in case a user
+// has added them inadvertently during copy/paste.
+func (s *HandlersSuite) TestTrimSpaceInQuery(c *check.C) {
+ handler := LoadToken(s)
+ handler.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest("GET", "/foo/bar?api_token=%20xyzzy%0a", nil))
+ c.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{"xyzzy"})
+}
+func (s *HandlersSuite) TestTrimSpaceInPostForm(c *check.C) {
+ handler := LoadToken(s)
+ req := httptest.NewRequest("POST", "/foo/bar", strings.NewReader(url.Values{"api_token": []string{"\nxyzzy\n"}}.Encode()))
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ handler.ServeHTTP(httptest.NewRecorder(), req)
+ c.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{"xyzzy"})
+}
+func (s *HandlersSuite) TestTrimSpaceInCookie(c *check.C) {
+ handler := LoadToken(s)
+ req := httptest.NewRequest("GET", "/foo/bar", nil)
+ req.AddCookie(&http.Cookie{Name: "arvados_api_token", Value: EncodeTokenCookie([]byte("\vxyzzy\n"))})
+ handler.ServeHTTP(httptest.NewRecorder(), req)
+ c.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{"xyzzy"})
+}
+func (s *HandlersSuite) TestTrimSpaceInBasicAuth(c *check.C) {
+ handler := LoadToken(s)
+ req := httptest.NewRequest("GET", "/foo/bar", nil)
+ req.SetBasicAuth("username", "\txyzzy\n")
+ handler.ServeHTTP(httptest.NewRecorder(), req)
+ c.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{"xyzzy"})
}
func (s *HandlersSuite) TestRequireLiteralTokenEmpty(c *check.C) {
@@ -76,4 +105,5 @@ func (s *HandlersSuite) TestRequireLiteralToken(c *check.C) {
func (s *HandlersSuite) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.served++
s.gotCredentials = CredentialsFromRequest(r)
+ s.gotCredentials.LoadTokensFromHTTPRequestBody(r)
}
diff --git a/sdk/go/health/aggregator.go b/sdk/go/health/aggregator.go
index b5301dffe0..3bf37b1294 100644
--- a/sdk/go/health/aggregator.go
+++ b/sdk/go/health/aggregator.go
@@ -135,6 +135,7 @@ type CheckResult struct {
Response map[string]interface{} `json:",omitempty"`
ResponseTime json.Number
ClockTime time.Time
+ Server string // "Server" header in http response
Metrics
respTime time.Duration
}
@@ -223,7 +224,8 @@ func (agg *Aggregator) ClusterHealth() ClusterHealthResponse {
for svcName, sh := range resp.Services {
switch svcName {
case arvados.ServiceNameDispatchCloud,
- arvados.ServiceNameDispatchLSF:
+ arvados.ServiceNameDispatchLSF,
+ arvados.ServiceNameDispatchSLURM:
// ok to not run any given dispatcher
case arvados.ServiceNameHealth,
arvados.ServiceNameWorkbench1,
@@ -359,6 +361,7 @@ func (agg *Aggregator) ping(target *url.URL) (result CheckResult) {
}
result.Health = "OK"
result.ClockTime, _ = time.Parse(time.RFC1123, resp.Header.Get("Date"))
+ result.Server = resp.Header.Get("Server")
return
}
@@ -437,7 +440,7 @@ func (ccmd checkCommand) RunCommand(prog string, args []string, stdin io.Reader,
err := ccmd.run(ctx, prog, args, stdin, stdout, stderr)
if err != nil {
if err != errSilent {
- fmt.Fprintln(stdout, err.Error())
+ fmt.Fprintln(stderr, err.Error())
}
return 1
}
@@ -451,7 +454,8 @@ func (ccmd checkCommand) run(ctx context.Context, prog string, args []string, st
loader.SetupFlags(flags)
versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
timeout := flags.Duration("timeout", defaultTimeout.Duration(), "Maximum time to wait for health responses")
- outputYAML := flags.Bool("yaml", false, "Output full health report in YAML format (default mode shows errors as plain text, is silent on success)")
+ quiet := flags.Bool("quiet", false, "Silent on success (suppress 'health check OK' message on stderr)")
+ outputYAML := flags.Bool("yaml", false, "Output full health report in YAML format (default mode prints 'health check OK' or plain text errors)")
if ok, _ := cmd.ParseFlags(flags, prog, args, "", stderr); !ok {
// cmd.ParseFlags already reported the error
return errSilent
@@ -486,11 +490,14 @@ func (ccmd checkCommand) run(ctx context.Context, prog string, args []string, st
}
if resp.Health != "OK" {
for _, msg := range resp.Errors {
- fmt.Fprintln(stdout, msg)
+ fmt.Fprintln(stderr, msg)
}
fmt.Fprintln(stderr, "health check failed")
return errSilent
}
+ if !*quiet {
+ fmt.Fprintln(stderr, "health check OK")
+ }
return nil
}
diff --git a/sdk/go/health/aggregator_test.go b/sdk/go/health/aggregator_test.go
index b1166c27d4..f76f7b8ea8 100644
--- a/sdk/go/health/aggregator_test.go
+++ b/sdk/go/health/aggregator_test.go
@@ -321,6 +321,13 @@ func (s *AggregatorSuite) TestCheckCommand(c *check.C) {
exitcode := CheckCommand.RunCommand("check", []string{"-config=" + tmpdir + "/config.yml"}, &bytes.Buffer{}, &stdout, &stderr)
c.Check(exitcode, check.Equals, 0)
+ c.Check(stderr.String(), check.Equals, "health check OK\n")
+ c.Check(stdout.String(), check.Equals, "")
+
+ stdout.Reset()
+ stderr.Reset()
+ exitcode = CheckCommand.RunCommand("check", []string{"-quiet", "-config=" + tmpdir + "/config.yml"}, &bytes.Buffer{}, &stdout, &stderr)
+ c.Check(exitcode, check.Equals, 0)
c.Check(stderr.String(), check.Equals, "")
c.Check(stdout.String(), check.Equals, "")
diff --git a/sdk/go/httpserver/error.go b/sdk/go/httpserver/error.go
index 75ff85336f..7a4233d6c6 100644
--- a/sdk/go/httpserver/error.go
+++ b/sdk/go/httpserver/error.go
@@ -10,6 +10,11 @@ import (
"net/http"
)
+type HTTPStatusError interface {
+ error
+ HTTPStatus() int
+}
+
func Errorf(status int, tmpl string, args ...interface{}) error {
return errorWithStatus{fmt.Errorf(tmpl, args...), status}
}
diff --git a/sdk/go/httpserver/request_limiter.go b/sdk/go/httpserver/request_limiter.go
index 8889453125..1e3316ed48 100644
--- a/sdk/go/httpserver/request_limiter.go
+++ b/sdk/go/httpserver/request_limiter.go
@@ -5,88 +5,323 @@
package httpserver
import (
+ "container/heap"
+ "math"
"net/http"
- "sync/atomic"
+ "sync"
+ "time"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
-// RequestCounter is an http.Handler that tracks the number of
-// requests in progress.
-type RequestCounter interface {
- http.Handler
+const MinPriority = math.MinInt64
- // Current() returns the number of requests in progress.
- Current() int
+// Prometheus typically polls every 10 seconds, but it doesn't cost us
+// much to also accommodate higher frequency collection by updating
+// internal stats more frequently. (This limits time resolution only
+// for the metrics that aren't generated on the fly.)
+const metricsUpdateInterval = time.Second
- // Max() returns the maximum number of concurrent requests
- // that will be accepted.
- Max() int
+// RequestLimiter wraps http.Handler, limiting the number of
+// concurrent requests being handled by the wrapped Handler. Requests
+// that arrive when the handler is already at the specified
+// concurrency limit are queued and handled in the order indicated by
+// the Priority function.
+//
+// Caller must not modify any RequestLimiter fields after calling its
+// methods.
+type RequestLimiter struct {
+ Handler http.Handler
+
+ // Queue determines which queue a request is assigned to.
+ Queue func(req *http.Request) *RequestQueue
+
+ // Priority determines queue ordering. Requests with higher
+ // priority are handled first. Requests with equal priority
+ // are handled FIFO. If Priority is nil, all requests are
+ // handled FIFO.
+ Priority func(req *http.Request, queued time.Time) int64
+
+ // "concurrent_requests", "max_concurrent_requests",
+ // "queued_requests", and "max_queued_requests" metrics are
+ // registered with Registry, if it is not nil.
+ Registry *prometheus.Registry
+
+ setupOnce sync.Once
+ mQueueDelay *prometheus.SummaryVec
+ mQueueTimeout *prometheus.SummaryVec
+ mQueueUsage *prometheus.GaugeVec
+ mtx sync.Mutex
+ rqs map[*RequestQueue]bool // all RequestQueues in use
+}
+
+type RequestQueue struct {
+ // Label for metrics. No two queues should have the same label.
+ Label string
+
+ // Maximum number of requests being handled at once. Beyond
+ // this limit, requests will be queued.
+ MaxConcurrent int
+
+ // Maximum number of requests in the queue. Beyond this limit,
+ // the lowest priority requests will return 503.
+ MaxQueue int
+
+ // Return 503 for any request for which Priority() returns
+ // MinPriority if it spends longer than this in the queue
+ // before starting processing.
+ MaxQueueTimeForMinPriority time.Duration
+
+ queue queue
+ handling int
}
-type limiterHandler struct {
- requests chan struct{}
- handler http.Handler
- count int64 // only used if cap(requests)==0
+type qent struct {
+ rq *RequestQueue
+ queued time.Time
+ priority int64
+ heappos int
+ ready chan bool // true = handle now; false = return 503 now
}
-// NewRequestLimiter returns a RequestCounter that delegates up to
-// maxRequests at a time to the given handler, and responds 503 to all
-// incoming requests beyond that limit.
-//
-// "concurrent_requests" and "max_concurrent_requests" metrics are
-// registered with the given reg, if reg is not nil.
-func NewRequestLimiter(maxRequests int, handler http.Handler, reg *prometheus.Registry) RequestCounter {
- h := &limiterHandler{
- requests: make(chan struct{}, maxRequests),
- handler: handler,
+type queue []*qent
+
+func (h queue) Swap(i, j int) {
+ h[i], h[j] = h[j], h[i]
+ h[i].heappos, h[j].heappos = i, j
+}
+
+func (h queue) Less(i, j int) bool {
+ pi, pj := h[i].priority, h[j].priority
+ return pi > pj || (pi == pj && h[i].queued.Before(h[j].queued))
+}
+
+func (h queue) Len() int {
+ return len(h)
+}
+
+func (h *queue) Push(x interface{}) {
+ n := len(*h)
+ ent := x.(*qent)
+ ent.heappos = n
+ *h = append(*h, ent)
+}
+
+func (h *queue) Pop() interface{} {
+ n := len(*h)
+ ent := (*h)[n-1]
+ ent.heappos = -1
+ (*h)[n-1] = nil
+ *h = (*h)[0 : n-1]
+ return ent
+}
+
+func (h *queue) add(ent *qent) {
+ ent.heappos = h.Len()
+ h.Push(ent)
+}
+
+func (h *queue) removeMax() *qent {
+ return heap.Pop(h).(*qent)
+}
+
+func (h *queue) remove(i int) {
+ heap.Remove(h, i)
+}
+
+func (rl *RequestLimiter) setup() {
+ if rl.Registry != nil {
+ mCurrentReqs := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Name: "concurrent_requests",
+ Help: "Number of requests in progress",
+ }, []string{"queue"})
+ rl.Registry.MustRegister(mCurrentReqs)
+ mMaxReqs := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Name: "max_concurrent_requests",
+ Help: "Maximum number of concurrent requests",
+ }, []string{"queue"})
+ rl.Registry.MustRegister(mMaxReqs)
+ mMaxQueue := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Name: "max_queued_requests",
+ Help: "Maximum number of queued requests",
+ }, []string{"queue"})
+ rl.Registry.MustRegister(mMaxQueue)
+ rl.mQueueUsage = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Name: "queued_requests",
+ Help: "Number of requests in queue",
+ }, []string{"queue", "priority"})
+ rl.Registry.MustRegister(rl.mQueueUsage)
+ rl.mQueueDelay = prometheus.NewSummaryVec(prometheus.SummaryOpts{
+ Namespace: "arvados",
+ Name: "queue_delay_seconds",
+ Help: "Time spent in the incoming request queue before start of processing",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
+ }, []string{"queue", "priority"})
+ rl.Registry.MustRegister(rl.mQueueDelay)
+ rl.mQueueTimeout = prometheus.NewSummaryVec(prometheus.SummaryOpts{
+ Namespace: "arvados",
+ Name: "queue_timeout_seconds",
+ Help: "Time spent in the incoming request queue before client timed out or disconnected",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
+ }, []string{"queue", "priority"})
+ rl.Registry.MustRegister(rl.mQueueTimeout)
+ go func() {
+ for range time.NewTicker(metricsUpdateInterval).C {
+ rl.mtx.Lock()
+ for rq := range rl.rqs {
+ var low, normal, high int
+ for _, ent := range rq.queue {
+ switch {
+ case ent.priority < 0:
+ low++
+ case ent.priority > 0:
+ high++
+ default:
+ normal++
+ }
+ }
+ mCurrentReqs.WithLabelValues(rq.Label).Set(float64(rq.handling))
+ mMaxReqs.WithLabelValues(rq.Label).Set(float64(rq.MaxConcurrent))
+ mMaxQueue.WithLabelValues(rq.Label).Set(float64(rq.MaxQueue))
+ rl.mQueueUsage.WithLabelValues(rq.Label, "low").Set(float64(low))
+ rl.mQueueUsage.WithLabelValues(rq.Label, "normal").Set(float64(normal))
+ rl.mQueueUsage.WithLabelValues(rq.Label, "high").Set(float64(high))
+ }
+ rl.mtx.Unlock()
+ }
+ }()
}
- if reg != nil {
- reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Name: "concurrent_requests",
- Help: "Number of requests in progress",
- },
- func() float64 { return float64(h.Current()) },
- ))
- reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Name: "max_concurrent_requests",
- Help: "Maximum number of concurrent requests",
- },
- func() float64 { return float64(h.Max()) },
- ))
+}
+
+// caller must have lock
+func (rq *RequestQueue) runqueue() {
+ // Handle entries from the queue as capacity permits
+ for len(rq.queue) > 0 && (rq.MaxConcurrent == 0 || rq.handling < rq.MaxConcurrent) {
+ rq.handling++
+ ent := rq.queue.removeMax()
+ ent.ready <- true
}
- return h
}
-func (h *limiterHandler) Current() int {
- if cap(h.requests) == 0 {
- return int(atomic.LoadInt64(&h.count))
+// If the queue is too full, fail and remove the lowest-priority
+// entry. Caller must have lock. Queue must not be empty.
+func (rq *RequestQueue) trimqueue() {
+ if len(rq.queue) <= rq.MaxQueue {
+ return
}
- return len(h.requests)
+ min := 0
+ for i := range rq.queue {
+ if i == 0 || rq.queue.Less(min, i) {
+ min = i
+ }
+ }
+ rq.queue[min].ready <- false
+ rq.queue.remove(min)
}
-func (h *limiterHandler) Max() int {
- return cap(h.requests)
+func (rl *RequestLimiter) enqueue(req *http.Request) *qent {
+ rl.mtx.Lock()
+ defer rl.mtx.Unlock()
+ qtime := time.Now()
+ var priority int64
+ if rl.Priority != nil {
+ priority = rl.Priority(req, qtime)
+ }
+ ent := &qent{
+ rq: rl.Queue(req),
+ queued: qtime,
+ priority: priority,
+ ready: make(chan bool, 1),
+ heappos: -1,
+ }
+ if rl.rqs == nil {
+ rl.rqs = map[*RequestQueue]bool{}
+ }
+ rl.rqs[ent.rq] = true
+ if ent.rq.MaxConcurrent == 0 || ent.rq.MaxConcurrent > ent.rq.handling {
+ // fast path, skip the queue
+ ent.rq.handling++
+ ent.ready <- true
+ return ent
+ }
+ ent.rq.queue.add(ent)
+ ent.rq.trimqueue()
+ return ent
}
-func (h *limiterHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
- if cap(h.requests) == 0 {
- atomic.AddInt64(&h.count, 1)
- defer atomic.AddInt64(&h.count, -1)
- h.handler.ServeHTTP(resp, req)
- return
+func (rl *RequestLimiter) remove(ent *qent) {
+ rl.mtx.Lock()
+ defer rl.mtx.Unlock()
+ if ent.heappos >= 0 {
+ ent.rq.queue.remove(ent.heappos)
+ ent.ready <- false
+ }
+}
+
+func (rl *RequestLimiter) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ rl.setupOnce.Do(rl.setup)
+ ent := rl.enqueue(req)
+ SetResponseLogFields(req.Context(), logrus.Fields{"priority": ent.priority, "queue": ent.rq.Label})
+ if ent.priority == MinPriority {
+ // Note that MaxQueueTime==0 does not cancel a req
+ // that skips the queue, because in that case
+ // rl.enqueue() has already fired ready<-true and
+ // rl.remove() is a no-op.
+ go func() {
+ time.Sleep(ent.rq.MaxQueueTimeForMinPriority)
+ rl.remove(ent)
+ }()
}
+ var ok bool
select {
- case h.requests <- struct{}{}:
- default:
- // reached max requests
+ case <-req.Context().Done():
+ rl.remove(ent)
+ // we still need to wait for ent.ready, because
+ // sometimes runqueue() will have already decided to
+ // send true before our rl.remove() call, and in that
+ // case we'll need to decrement ent.rq.handling below.
+ ok = <-ent.ready
+ case ok = <-ent.ready:
+ }
+
+ // Report time spent in queue in the appropriate bucket:
+ // mQueueDelay if the request actually got processed,
+ // mQueueTimeout if it was abandoned or cancelled before
+ // getting a processing slot.
+ var series *prometheus.SummaryVec
+ if ok {
+ series = rl.mQueueDelay
+ } else {
+ series = rl.mQueueTimeout
+ }
+ if series != nil {
+ var qlabel string
+ switch {
+ case ent.priority < 0:
+ qlabel = "low"
+ case ent.priority > 0:
+ qlabel = "high"
+ default:
+ qlabel = "normal"
+ }
+ series.WithLabelValues(ent.rq.Label, qlabel).Observe(time.Now().Sub(ent.queued).Seconds())
+ }
+
+ if !ok {
resp.WriteHeader(http.StatusServiceUnavailable)
return
}
- h.handler.ServeHTTP(resp, req)
- <-h.requests
+ defer func() {
+ rl.mtx.Lock()
+ defer rl.mtx.Unlock()
+ ent.rq.handling--
+ // unblock the next waiting request
+ ent.rq.runqueue()
+ }()
+ rl.Handler.ServeHTTP(resp, req)
}
diff --git a/sdk/go/httpserver/request_limiter_test.go b/sdk/go/httpserver/request_limiter_test.go
index 9258fbfa58..7366e1426b 100644
--- a/sdk/go/httpserver/request_limiter_test.go
+++ b/sdk/go/httpserver/request_limiter_test.go
@@ -5,11 +5,14 @@
package httpserver
import (
+ "fmt"
"net/http"
"net/http/httptest"
+ "strconv"
"sync"
- "testing"
"time"
+
+ check "gopkg.in/check.v1"
)
type testHandler struct {
@@ -29,9 +32,13 @@ func newTestHandler() *testHandler {
}
}
-func TestRequestLimiter1(t *testing.T) {
+func (s *Suite) TestRequestLimiter1(c *check.C) {
h := newTestHandler()
- l := NewRequestLimiter(1, h, nil)
+ rq := &RequestQueue{
+ MaxConcurrent: 1}
+ l := RequestLimiter{
+ Queue: func(*http.Request) *RequestQueue { return rq },
+ Handler: h}
var wg sync.WaitGroup
resps := make([]*httptest.ResponseRecorder, 10)
for i := 0; i < 10; i++ {
@@ -59,7 +66,7 @@ func TestRequestLimiter1(t *testing.T) {
select {
case <-done:
case <-time.After(10 * time.Second):
- t.Fatal("test timed out, probably deadlocked")
+ c.Fatal("test timed out, probably deadlocked")
}
n200 := 0
n503 := 0
@@ -70,11 +77,11 @@ func TestRequestLimiter1(t *testing.T) {
case 503:
n503++
default:
- t.Fatalf("Unexpected response code %d", resps[i].Code)
+ c.Fatalf("Unexpected response code %d", resps[i].Code)
}
}
if n200 != 1 || n503 != 9 {
- t.Fatalf("Got %d 200 responses, %d 503 responses (expected 1, 9)", n200, n503)
+ c.Fatalf("Got %d 200 responses, %d 503 responses (expected 1, 9)", n200, n503)
}
// Now that all 10 are finished, an 11th request should
// succeed.
@@ -85,13 +92,17 @@ func TestRequestLimiter1(t *testing.T) {
resp := httptest.NewRecorder()
l.ServeHTTP(resp, &http.Request{})
if resp.Code != 200 {
- t.Errorf("Got status %d on 11th request, want 200", resp.Code)
+ c.Errorf("Got status %d on 11th request, want 200", resp.Code)
}
}
-func TestRequestLimiter10(t *testing.T) {
+func (*Suite) TestRequestLimiter10(c *check.C) {
h := newTestHandler()
- l := NewRequestLimiter(10, h, nil)
+ rq := &RequestQueue{
+ MaxConcurrent: 10}
+ l := RequestLimiter{
+ Queue: func(*http.Request) *RequestQueue { return rq },
+ Handler: h}
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
@@ -108,3 +119,99 @@ func TestRequestLimiter10(t *testing.T) {
}
wg.Wait()
}
+
+func (*Suite) TestRequestLimiterQueuePriority(c *check.C) {
+ h := newTestHandler()
+ rq := &RequestQueue{
+ MaxConcurrent: 1000,
+ MaxQueue: 200,
+ }
+ rl := RequestLimiter{
+ Handler: h,
+ Queue: func(*http.Request) *RequestQueue { return rq },
+ Priority: func(r *http.Request, _ time.Time) int64 {
+ p, _ := strconv.ParseInt(r.Header.Get("Priority"), 10, 64)
+ return p
+ }}
+
+ c.Logf("starting initial requests")
+ for i := 0; i < rq.MaxConcurrent; i++ {
+ go func() {
+ rl.ServeHTTP(httptest.NewRecorder(), &http.Request{Header: http.Header{"No-Priority": {"x"}}})
+ }()
+ }
+ c.Logf("waiting for initial requests to consume all MaxConcurrent slots")
+ for i := 0; i < rq.MaxConcurrent; i++ {
+ <-h.inHandler
+ }
+
+ c.Logf("starting %d priority=MinPriority requests (should respond 503 immediately)", rq.MaxQueue)
+ var wgX sync.WaitGroup
+ for i := 0; i < rq.MaxQueue; i++ {
+ wgX.Add(1)
+ go func() {
+ defer wgX.Done()
+ resp := httptest.NewRecorder()
+ rl.ServeHTTP(resp, &http.Request{Header: http.Header{"Priority": {fmt.Sprintf("%d", MinPriority)}}})
+ c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+ }()
+ }
+ wgX.Wait()
+
+ c.Logf("starting %d priority=MinPriority requests (should respond 503 after 100 ms)", rq.MaxQueue)
+ // Usage docs say the caller isn't allowed to change fields
+ // after first use, but we secretly know it's OK to change
+ // this field on the fly as long as no requests are arriving
+ // concurrently.
+ rq.MaxQueueTimeForMinPriority = time.Millisecond * 100
+ for i := 0; i < rq.MaxQueue; i++ {
+ wgX.Add(1)
+ go func() {
+ defer wgX.Done()
+ resp := httptest.NewRecorder()
+ t0 := time.Now()
+ rl.ServeHTTP(resp, &http.Request{Header: http.Header{"Priority": {fmt.Sprintf("%d", MinPriority)}}})
+ c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+ elapsed := time.Since(t0)
+ c.Check(elapsed > rq.MaxQueueTimeForMinPriority, check.Equals, true)
+ c.Check(elapsed < rq.MaxQueueTimeForMinPriority*10, check.Equals, true)
+ }()
+ }
+ wgX.Wait()
+
+ c.Logf("starting %d priority=1 and %d priority=1 requests", rq.MaxQueue, rq.MaxQueue)
+ var wg1, wg2 sync.WaitGroup
+ wg1.Add(rq.MaxQueue)
+ wg2.Add(rq.MaxQueue)
+ for i := 0; i < rq.MaxQueue*2; i++ {
+ i := i
+ go func() {
+ pri := (i & 1) + 1
+ resp := httptest.NewRecorder()
+ rl.ServeHTTP(resp, &http.Request{Header: http.Header{"Priority": {fmt.Sprintf("%d", pri)}}})
+ if pri == 1 {
+ c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+ wg1.Done()
+ } else {
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ wg2.Done()
+ }
+ }()
+ }
+
+ c.Logf("waiting for queued priority=1 requests to fail")
+ wg1.Wait()
+
+ c.Logf("allowing initial requests to proceed")
+ for i := 0; i < rq.MaxConcurrent; i++ {
+ h.okToProceed <- struct{}{}
+ }
+
+ c.Logf("allowing queued priority=2 requests to proceed")
+ for i := 0; i < rq.MaxQueue; i++ {
+ <-h.inHandler
+ h.okToProceed <- struct{}{}
+ }
+ c.Logf("waiting for queued priority=2 requests to succeed")
+ wg2.Wait()
+}
diff --git a/sdk/go/keepclient/block_cache.go b/sdk/go/keepclient/block_cache.go
deleted file mode 100644
index bac4a24fd5..0000000000
--- a/sdk/go/keepclient/block_cache.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package keepclient
-
-import (
- "io"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-var DefaultBlockCache = &BlockCache{}
-
-type BlockCache struct {
- // Maximum number of blocks to keep in the cache. If 0, a
- // default size (currently 4) is used instead.
- MaxBlocks int
-
- cache map[string]*cacheBlock
- mtx sync.Mutex
-}
-
-const defaultMaxBlocks = 4
-
-// Sweep deletes the least recently used blocks from the cache until
-// there are no more than MaxBlocks left.
-func (c *BlockCache) Sweep() {
- max := c.MaxBlocks
- if max == 0 {
- max = defaultMaxBlocks
- }
- c.mtx.Lock()
- defer c.mtx.Unlock()
- if len(c.cache) <= max {
- return
- }
- lru := make([]time.Time, 0, len(c.cache))
- for _, b := range c.cache {
- lru = append(lru, b.lastUse)
- }
- sort.Sort(sort.Reverse(timeSlice(lru)))
- threshold := lru[max]
- for loc, b := range c.cache {
- if !b.lastUse.After(threshold) {
- delete(c.cache, loc)
- }
- }
-}
-
-// ReadAt returns data from the cache, first retrieving it from Keep if
-// necessary.
-func (c *BlockCache) ReadAt(kc *KeepClient, locator string, p []byte, off int) (int, error) {
- buf, err := c.Get(kc, locator)
- if err != nil {
- return 0, err
- }
- if off > len(buf) {
- return 0, io.ErrUnexpectedEOF
- }
- return copy(p, buf[off:]), nil
-}
-
-// Get returns data from the cache, first retrieving it from Keep if
-// necessary.
-func (c *BlockCache) Get(kc *KeepClient, locator string) ([]byte, error) {
- cacheKey := locator[:32]
- bufsize := BLOCKSIZE
- if parts := strings.SplitN(locator, "+", 3); len(parts) >= 2 {
- datasize, err := strconv.ParseInt(parts[1], 10, 32)
- if err == nil && datasize >= 0 {
- bufsize = int(datasize)
- }
- }
- c.mtx.Lock()
- if c.cache == nil {
- c.cache = make(map[string]*cacheBlock)
- }
- b, ok := c.cache[cacheKey]
- if !ok || b.err != nil {
- b = &cacheBlock{
- fetched: make(chan struct{}),
- lastUse: time.Now(),
- }
- c.cache[cacheKey] = b
- go func() {
- rdr, size, _, err := kc.Get(locator)
- var data []byte
- if err == nil {
- data = make([]byte, size, bufsize)
- _, err = io.ReadFull(rdr, data)
- err2 := rdr.Close()
- if err == nil {
- err = err2
- }
- }
- c.mtx.Lock()
- b.data, b.err = data, err
- c.mtx.Unlock()
- close(b.fetched)
- go c.Sweep()
- }()
- }
- c.mtx.Unlock()
-
- // Wait (with mtx unlocked) for the fetch goroutine to finish,
- // in case it hasn't already.
- <-b.fetched
-
- c.mtx.Lock()
- b.lastUse = time.Now()
- c.mtx.Unlock()
- return b.data, b.err
-}
-
-func (c *BlockCache) Clear() {
- c.mtx.Lock()
- c.cache = nil
- c.mtx.Unlock()
-}
-
-type timeSlice []time.Time
-
-func (ts timeSlice) Len() int { return len(ts) }
-
-func (ts timeSlice) Less(i, j int) bool { return ts[i].Before(ts[j]) }
-
-func (ts timeSlice) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
-
-type cacheBlock struct {
- data []byte
- err error
- fetched chan struct{}
- lastUse time.Time
-}
diff --git a/sdk/go/keepclient/collectionreader_test.go b/sdk/go/keepclient/collectionreader_test.go
index 75603f1baa..c1bad8557d 100644
--- a/sdk/go/keepclient/collectionreader_test.go
+++ b/sdk/go/keepclient/collectionreader_test.go
@@ -237,7 +237,9 @@ func (s *CollectionReaderUnit) TestCollectionReaderManyBlocks(c *check.C) {
}
func (s *CollectionReaderUnit) TestCollectionReaderCloseEarly(c *check.C) {
- s.kc.BlockCache = &BlockCache{}
+ // Disable cache
+ s.kc.gatewayStack = &keepViaHTTP{s.kc}
+
s.kc.PutB([]byte("foo"))
s.kc.PutB([]byte("bar"))
s.kc.PutB([]byte("baz"))
diff --git a/sdk/go/keepclient/gateway_shim.go b/sdk/go/keepclient/gateway_shim.go
new file mode 100644
index 0000000000..260824453d
--- /dev/null
+++ b/sdk/go/keepclient/gateway_shim.go
@@ -0,0 +1,78 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// keepViaHTTP implements arvados.KeepGateway by using a KeepClient to
+// do upstream requests to keepstore and keepproxy.
+//
+// This enables KeepClient to use KeepGateway wrappers (like
+// arvados.DiskCache) to wrap its own HTTP client back-end methods
+// (getOrHead, httpBlockWrite).
+//
+// See (*KeepClient)upstreamGateway() for the relevant glue.
+type keepViaHTTP struct {
+ *KeepClient
+}
+
+func (kvh *keepViaHTTP) ReadAt(locator string, dst []byte, offset int) (int, error) {
+ rdr, _, _, _, err := kvh.getOrHead("GET", locator, nil)
+ if err != nil {
+ return 0, err
+ }
+ defer rdr.Close()
+ _, err = io.CopyN(io.Discard, rdr, int64(offset))
+ if err != nil {
+ return 0, err
+ }
+ n, err := rdr.Read(dst)
+ return int(n), err
+}
+
+func (kvh *keepViaHTTP) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {
+ rdr, _, _, _, err := kvh.getOrHead("GET", opts.Locator, nil)
+ if err != nil {
+ return 0, err
+ }
+ n, err := io.Copy(opts.WriteTo, rdr)
+ errClose := rdr.Close()
+ if err == nil {
+ err = errClose
+ }
+ return int(n), err
+}
+
+func (kvh *keepViaHTTP) BlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+ return kvh.httpBlockWrite(ctx, req)
+}
+
+func (kvh *keepViaHTTP) LocalLocator(locator string) (string, error) {
+ if !strings.Contains(locator, "+R") {
+ // Either it has +A, or it's unsigned and we assume
+ // it's a local locator on a site with signatures
+ // disabled.
+ return locator, nil
+ }
+ sighdr := fmt.Sprintf("local, time=%s", time.Now().UTC().Format(time.RFC3339))
+ _, _, url, hdr, err := kvh.KeepClient.getOrHead("HEAD", locator, http.Header{"X-Keep-Signature": []string{sighdr}})
+ if err != nil {
+ return "", err
+ }
+ loc := hdr.Get("X-Keep-Locator")
+ if loc == "" {
+ return "", fmt.Errorf("missing X-Keep-Locator header in HEAD response from %s", url)
+ }
+ return loc, nil
+}
diff --git a/sdk/go/keepclient/hashcheck.go b/sdk/go/keepclient/hashcheck.go
index 0966e072ea..f1d5c6ccce 100644
--- a/sdk/go/keepclient/hashcheck.go
+++ b/sdk/go/keepclient/hashcheck.go
@@ -47,12 +47,7 @@ func (hcr HashCheckingReader) Read(p []byte) (n int, err error) {
// BadChecksum if writing is successful but the checksum doesn't
// match.
func (hcr HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {
- if writeto, ok := hcr.Reader.(io.WriterTo); ok {
- written, err = writeto.WriteTo(io.MultiWriter(dest, hcr.Hash))
- } else {
- written, err = io.Copy(io.MultiWriter(dest, hcr.Hash), hcr.Reader)
- }
-
+ written, err = io.Copy(io.MultiWriter(dest, hcr.Hash), hcr.Reader)
if err != nil {
return written, err
}
diff --git a/sdk/go/keepclient/keepclient.go b/sdk/go/keepclient/keepclient.go
index 68ac886ddd..d97a2d1fcd 100644
--- a/sdk/go/keepclient/keepclient.go
+++ b/sdk/go/keepclient/keepclient.go
@@ -7,6 +7,7 @@
package keepclient
import (
+ "bufio"
"bytes"
"context"
"crypto/md5"
@@ -16,6 +17,8 @@ import (
"io/ioutil"
"net"
"net/http"
+ "os"
+ "path/filepath"
"regexp"
"strconv"
"strings"
@@ -40,6 +43,12 @@ var (
DefaultProxyConnectTimeout = 30 * time.Second
DefaultProxyTLSHandshakeTimeout = 10 * time.Second
DefaultProxyKeepAlive = 120 * time.Second
+
+ DefaultRetryDelay = 2 * time.Second // see KeepClient.RetryDelay
+ MinimumRetryDelay = time.Millisecond
+
+ rootCacheDir = "/var/cache/arvados/keep"
+ userCacheDir = ".cache/arvados/keep" // relative to HOME
)
// Error interface with an error and boolean indicating whether the error is temporary
@@ -69,6 +78,8 @@ type ErrNotFound struct {
multipleResponseError
}
+func (*ErrNotFound) HTTPStatus() int { return http.StatusNotFound }
+
type InsufficientReplicasError struct{ error }
type OversizeBlockError struct{ error }
@@ -95,20 +106,33 @@ type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
+const DiskCacheDisabled = arvados.ByteSizeOrPercent(1)
+
// KeepClient holds information about Arvados and Keep servers.
type KeepClient struct {
- Arvados *arvadosclient.ArvadosClient
- Want_replicas int
- localRoots map[string]string
- writableLocalRoots map[string]string
- gatewayRoots map[string]string
- lock sync.RWMutex
- HTTPClient HTTPClient
- Retries int
- BlockCache *BlockCache
+ Arvados *arvadosclient.ArvadosClient
+ Want_replicas int
+ localRoots map[string]string
+ writableLocalRoots map[string]string
+ gatewayRoots map[string]string
+ lock sync.RWMutex
+ HTTPClient HTTPClient
+
+ // Number of times to automatically retry a read/write
+ // operation after a transient failure.
+ Retries int
+
+ // Initial maximum delay for automatic retry. If zero,
+ // DefaultRetryDelay is used. The delay after attempt N
+ // (0-based) will be a random duration between
+ // MinimumRetryDelay and RetryDelay * 2^N, not to exceed a cap
+ // of RetryDelay * 10.
+ RetryDelay time.Duration
+
RequestID string
StorageClasses []string
- DefaultStorageClasses []string // Set by cluster's exported config
+ DefaultStorageClasses []string // Set by cluster's exported config
+ DiskCacheSize arvados.ByteSizeOrPercent // See also DiskCacheDisabled
// set to 1 if all writable services are of disk type, otherwise 0
replicasPerService int
@@ -118,6 +142,30 @@ type KeepClient struct {
// Disable automatic discovery of keep services
disableDiscovery bool
+
+ gatewayStack arvados.KeepGateway
+}
+
+func (kc *KeepClient) Clone() *KeepClient {
+ kc.lock.Lock()
+ defer kc.lock.Unlock()
+ return &KeepClient{
+ Arvados: kc.Arvados,
+ Want_replicas: kc.Want_replicas,
+ localRoots: kc.localRoots,
+ writableLocalRoots: kc.writableLocalRoots,
+ gatewayRoots: kc.gatewayRoots,
+ HTTPClient: kc.HTTPClient,
+ Retries: kc.Retries,
+ RetryDelay: kc.RetryDelay,
+ RequestID: kc.RequestID,
+ StorageClasses: kc.StorageClasses,
+ DefaultStorageClasses: kc.DefaultStorageClasses,
+ DiskCacheSize: kc.DiskCacheSize,
+ replicasPerService: kc.replicasPerService,
+ foundNonDiskSvc: kc.foundNonDiskSvc,
+ disableDiscovery: kc.disableDiscovery,
+ }
}
func (kc *KeepClient) loadDefaultClasses() error {
@@ -238,6 +286,7 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
var errs []string
+ delay := delayCalculator{InitialMaxDelay: kc.RetryDelay}
triesRemaining := 1 + kc.Retries
serversToTry := kc.getSortedRoots(locator)
@@ -317,6 +366,9 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
return nil, expectLength, url, resp.Header, nil
}
serversToTry = retryList
+ if len(serversToTry) > 0 && triesRemaining > 0 {
+ time.Sleep(delay.Next())
+ }
}
DebugPrintf("DEBUG: %s %s failed: %v", method, locator, errs)
@@ -332,44 +384,123 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
return nil, 0, "", nil, err
}
+// attempt to create dir/subdir/ and its parents, up to but not
+// including dir itself, using mode 0700.
+func makedirs(dir, subdir string) {
+ for _, part := range strings.Split(subdir, string(os.PathSeparator)) {
+ dir = filepath.Join(dir, part)
+ os.Mkdir(dir, 0700)
+ }
+}
+
+// upstreamGateway creates/returns the KeepGateway stack used to read
+// and write data: a disk-backed cache on top of an http backend.
+func (kc *KeepClient) upstreamGateway() arvados.KeepGateway {
+ kc.lock.Lock()
+ defer kc.lock.Unlock()
+ if kc.gatewayStack != nil {
+ return kc.gatewayStack
+ }
+ var cachedir string
+ if os.Geteuid() == 0 {
+ cachedir = rootCacheDir
+ makedirs("/", cachedir)
+ } else {
+ home := "/" + os.Getenv("HOME")
+ makedirs(home, userCacheDir)
+ cachedir = filepath.Join(home, userCacheDir)
+ }
+ backend := &keepViaHTTP{kc}
+ if kc.DiskCacheSize == DiskCacheDisabled {
+ kc.gatewayStack = backend
+ } else {
+ kc.gatewayStack = &arvados.DiskCache{
+ Dir: cachedir,
+ MaxSize: kc.DiskCacheSize,
+ KeepGateway: backend,
+ }
+ }
+ return kc.gatewayStack
+}
+
// LocalLocator returns a locator equivalent to the one supplied, but
// with a valid signature from the local cluster. If the given locator
// already has a local signature, it is returned unchanged.
func (kc *KeepClient) LocalLocator(locator string) (string, error) {
- if !strings.Contains(locator, "+R") {
- // Either it has +A, or it's unsigned and we assume
- // it's a local locator on a site with signatures
- // disabled.
- return locator, nil
- }
- sighdr := fmt.Sprintf("local, time=%s", time.Now().UTC().Format(time.RFC3339))
- _, _, url, hdr, err := kc.getOrHead("HEAD", locator, http.Header{"X-Keep-Signature": []string{sighdr}})
- if err != nil {
- return "", err
- }
- loc := hdr.Get("X-Keep-Locator")
- if loc == "" {
- return "", fmt.Errorf("missing X-Keep-Locator header in HEAD response from %s", url)
- }
- return loc, nil
+ return kc.upstreamGateway().LocalLocator(locator)
}
-// Get retrieves a block, given a locator. Returns a reader, the
-// expected data length, the URL the block is being fetched from, and
-// an error.
+// Get retrieves the specified block from the local cache or a backend
+// server. Returns a reader, the expected data length (or -1 if not
+// known), and an error.
+//
+// The third return value (formerly a source URL in previous versions)
+// is an empty string.
//
// If the block checksum does not match, the final Read() on the
// reader returned by this method will return a BadChecksum error
// instead of EOF.
+//
+// New code should use BlockRead and/or ReadAt instead of Get.
func (kc *KeepClient) Get(locator string) (io.ReadCloser, int64, string, error) {
- rdr, size, url, _, err := kc.getOrHead("GET", locator, nil)
- return rdr, size, url, err
+ loc, err := MakeLocator(locator)
+ if err != nil {
+ return nil, 0, "", err
+ }
+ pr, pw := io.Pipe()
+ go func() {
+ n, err := kc.BlockRead(context.Background(), arvados.BlockReadOptions{
+ Locator: locator,
+ WriteTo: pw,
+ })
+ if err != nil {
+ pw.CloseWithError(err)
+ } else if loc.Size >= 0 && n != loc.Size {
+ pw.CloseWithError(fmt.Errorf("expected block size %d but read %d bytes", loc.Size, n))
+ } else {
+ pw.Close()
+ }
+ }()
+ // Wait for the first byte to arrive, so that, if there's an
+ // error before we receive any data, we can return the error
+ // directly, instead of indirectly via a reader that returns
+ // an error.
+ bufr := bufio.NewReader(pr)
+ _, err = bufr.Peek(1)
+ if err != nil && err != io.EOF {
+ pr.CloseWithError(err)
+ return nil, 0, "", err
+ }
+ if err == io.EOF && (loc.Size == 0 || loc.Hash == "d41d8cd98f00b204e9800998ecf8427e") {
+ // In the special case of the zero-length block, EOF
+ // error from Peek() is normal.
+ return pr, 0, "", nil
+ }
+ return struct {
+ io.Reader
+ io.Closer
+ }{
+ Reader: bufr,
+ Closer: pr,
+ }, int64(loc.Size), "", err
+}
+
+// BlockRead retrieves a block from the cache if it's present, otherwise
+// from the network.
+func (kc *KeepClient) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {
+ return kc.upstreamGateway().BlockRead(ctx, opts)
}
// ReadAt retrieves a portion of block from the cache if it's
// present, otherwise from the network.
func (kc *KeepClient) ReadAt(locator string, p []byte, off int) (int, error) {
- return kc.cache().ReadAt(kc, locator, p, off)
+ return kc.upstreamGateway().ReadAt(locator, p, off)
+}
+
+// BlockWrite writes a full block to upstream servers and saves a copy
+// in the local cache.
+func (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+ return kc.upstreamGateway().BlockWrite(ctx, req)
}
// Ask verifies that a block with the given hash is available and
@@ -511,17 +642,6 @@ func (kc *KeepClient) getSortedRoots(locator string) []string {
return found
}
-func (kc *KeepClient) cache() *BlockCache {
- if kc.BlockCache != nil {
- return kc.BlockCache
- }
- return DefaultBlockCache
-}
-
-func (kc *KeepClient) ClearBlockCache() {
- kc.cache().Clear()
-}
-
func (kc *KeepClient) SetStorageClasses(sc []string) {
// make a copy so the caller can't mess with it.
kc.StorageClasses = append([]string{}, sc...)
diff --git a/sdk/go/keepclient/keepclient_test.go b/sdk/go/keepclient/keepclient_test.go
index a6e0a11d51..531db31b25 100644
--- a/sdk/go/keepclient/keepclient_test.go
+++ b/sdk/go/keepclient/keepclient_test.go
@@ -17,6 +17,7 @@ import (
"os"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -26,8 +27,8 @@ import (
. "gopkg.in/check.v1"
)
-// Gocheck boilerplate
func Test(t *testing.T) {
+ DefaultRetryDelay = 50 * time.Millisecond
TestingT(t)
}
@@ -39,10 +40,25 @@ var _ = Suite(&StandaloneSuite{})
type ServerRequiredSuite struct{}
// Standalone tests
-type StandaloneSuite struct{}
+type StandaloneSuite struct {
+ origDefaultRetryDelay time.Duration
+ origMinimumRetryDelay time.Duration
+}
+
+var origHOME = os.Getenv("HOME")
func (s *StandaloneSuite) SetUpTest(c *C) {
RefreshServiceDiscovery()
+ // Prevent cache state from leaking between test cases
+ os.Setenv("HOME", c.MkDir())
+ s.origDefaultRetryDelay = DefaultRetryDelay
+ s.origMinimumRetryDelay = MinimumRetryDelay
+}
+
+func (s *StandaloneSuite) TearDownTest(c *C) {
+ os.Setenv("HOME", origHOME)
+ DefaultRetryDelay = s.origDefaultRetryDelay
+ MinimumRetryDelay = s.origMinimumRetryDelay
}
func pythonDir() string {
@@ -56,19 +72,22 @@ func (s *ServerRequiredSuite) SetUpSuite(c *C) {
func (s *ServerRequiredSuite) TearDownSuite(c *C) {
arvadostest.StopKeep(2)
+ os.Setenv("HOME", origHOME)
}
func (s *ServerRequiredSuite) SetUpTest(c *C) {
RefreshServiceDiscovery()
+ // Prevent cache state from leaking between test cases
+ os.Setenv("HOME", c.MkDir())
}
func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
kc, err := MakeKeepClient(arv)
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
c.Check(len(kc.LocalRoots()), Equals, 2)
for _, root := range kc.LocalRoots() {
c.Check(root, Matches, "http://localhost:\\d+")
@@ -129,7 +148,7 @@ func (sph *StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request
sph.c.Check(req.Header.Get("X-Keep-Storage-Classes"), Equals, sph.expectStorageClass)
}
body, err := ioutil.ReadAll(req.Body)
- sph.c.Check(err, Equals, nil)
+ sph.c.Check(err, IsNil)
sph.c.Check(body, DeepEquals, []byte(sph.expectBody))
resp.Header().Set("X-Keep-Replicas-Stored", "1")
if sph.returnStorageClasses != "" {
@@ -410,17 +429,17 @@ func (fh FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
}
type FailThenSucceedHandler struct {
+ morefails int // fail 1 + this many times before succeeding
handled chan string
- count int
+ count atomic.Int64
successhandler http.Handler
reqIDs []string
}
func (fh *FailThenSucceedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
fh.reqIDs = append(fh.reqIDs, req.Header.Get("X-Request-Id"))
- if fh.count == 0 {
+ if int(fh.count.Add(1)) <= fh.morefails+1 {
resp.WriteHeader(500)
- fh.count++
fh.handled <- fmt.Sprintf("http://%s", req.Host)
} else {
fh.successhandler.ServeHTTP(resp, req)
@@ -549,14 +568,7 @@ func (s *StandaloneSuite) TestPutHR(c *C) {
kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
- reader, writer := io.Pipe()
-
- go func() {
- writer.Write([]byte("foo"))
- writer.Close()
- }()
-
- kc.PutHR(hash, reader, 3)
+ kc.PutHR(hash, bytes.NewBuffer([]byte("foo")), 3)
shuff := NewRootSorter(kc.LocalRoots(), hash).GetSortedRoots()
@@ -618,7 +630,7 @@ func (s *StandaloneSuite) TestPutWithFail(c *C) {
<-fh.handled
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(phash, Equals, "")
c.Check(replicas, Equals, 2)
@@ -697,7 +709,7 @@ func (sgh StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request)
}
func (s *StandaloneSuite) TestGet(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := StubGetHandler{
c,
@@ -715,19 +727,18 @@ func (s *StandaloneSuite) TestGet(c *C) {
arv.ApiToken = "abc123"
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
- r, n, url2, err := kc.Get(hash)
- defer r.Close()
- c.Check(err, Equals, nil)
+ r, n, _, err := kc.Get(hash)
+ c.Assert(err, IsNil)
c.Check(n, Equals, int64(3))
- c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks.url, hash))
content, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
+ c.Check(err2, IsNil)
c.Check(content, DeepEquals, []byte("foo"))
+ c.Check(r.Close(), IsNil)
}
func (s *StandaloneSuite) TestGet404(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := Error404Handler{make(chan string, 1)}
@@ -740,11 +751,10 @@ func (s *StandaloneSuite) TestGet404(c *C) {
arv.ApiToken = "abc123"
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
- r, n, url2, err := kc.Get(hash)
+ r, n, _, err := kc.Get(hash)
c.Check(err, Equals, BlockNotFound)
c.Check(n, Equals, int64(0))
- c.Check(url2, Equals, "")
- c.Check(r, Equals, nil)
+ c.Check(r, IsNil)
}
func (s *StandaloneSuite) TestGetEmptyBlock(c *C) {
@@ -759,18 +769,18 @@ func (s *StandaloneSuite) TestGetEmptyBlock(c *C) {
arv.ApiToken = "abc123"
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
- r, n, url2, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e+0")
+ r, n, _, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e+0")
c.Check(err, IsNil)
c.Check(n, Equals, int64(0))
- c.Check(url2, Equals, "")
c.Assert(r, NotNil)
buf, err := ioutil.ReadAll(r)
c.Check(err, IsNil)
c.Check(buf, DeepEquals, []byte{})
+ c.Check(r.Close(), IsNil)
}
func (s *StandaloneSuite) TestGetFail(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := FailHandler{make(chan string, 1)}
@@ -784,57 +794,84 @@ func (s *StandaloneSuite) TestGetFail(c *C) {
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
kc.Retries = 0
- r, n, url2, err := kc.Get(hash)
+ r, n, _, err := kc.Get(hash)
errNotFound, _ := err.(*ErrNotFound)
- c.Check(errNotFound, NotNil)
- c.Check(strings.Contains(errNotFound.Error(), "HTTP 500"), Equals, true)
- c.Check(errNotFound.Temporary(), Equals, true)
+ if c.Check(errNotFound, NotNil) {
+ c.Check(strings.Contains(errNotFound.Error(), "HTTP 500"), Equals, true)
+ c.Check(errNotFound.Temporary(), Equals, true)
+ }
c.Check(n, Equals, int64(0))
- c.Check(url2, Equals, "")
- c.Check(r, Equals, nil)
+ c.Check(r, IsNil)
}
func (s *StandaloneSuite) TestGetFailRetry(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
-
- st := &FailThenSucceedHandler{
- handled: make(chan string, 1),
- successhandler: StubGetHandler{
- c,
- hash,
- "abc123",
- http.StatusOK,
- []byte("foo")}}
-
- ks := RunFakeKeepServer(st)
- defer ks.listener.Close()
-
- arv, err := arvadosclient.MakeArvadosClient()
- c.Check(err, IsNil)
- kc, _ := MakeKeepClient(arv)
- arv.ApiToken = "abc123"
- kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
-
- r, n, url2, err := kc.Get(hash)
- defer r.Close()
- c.Check(err, Equals, nil)
- c.Check(n, Equals, int64(3))
- c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks.url, hash))
-
- content, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
- c.Check(content, DeepEquals, []byte("foo"))
-
- c.Logf("%q", st.reqIDs)
- c.Assert(len(st.reqIDs) > 1, Equals, true)
- for _, reqid := range st.reqIDs {
- c.Check(reqid, Not(Equals), "")
- c.Check(reqid, Equals, st.reqIDs[0])
+ defer func(origDefault, origMinimum time.Duration) {
+ DefaultRetryDelay = origDefault
+ MinimumRetryDelay = origMinimum
+ }(DefaultRetryDelay, MinimumRetryDelay)
+ DefaultRetryDelay = time.Second / 8
+ MinimumRetryDelay = time.Millisecond
+
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
+
+ for _, delay := range []time.Duration{0, time.Nanosecond, time.Second / 8, time.Second / 16} {
+ c.Logf("=== initial delay %v", delay)
+
+ st := &FailThenSucceedHandler{
+ morefails: 2,
+ handled: make(chan string, 4),
+ successhandler: StubGetHandler{
+ c,
+ hash,
+ "abc123",
+ http.StatusOK,
+ []byte("foo")}}
+
+ ks := RunFakeKeepServer(st)
+ defer ks.listener.Close()
+
+ arv, err := arvadosclient.MakeArvadosClient()
+ c.Check(err, IsNil)
+ kc, _ := MakeKeepClient(arv)
+ arv.ApiToken = "abc123"
+ kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+ kc.Retries = 3
+ kc.RetryDelay = delay
+ kc.DiskCacheSize = DiskCacheDisabled
+
+ t0 := time.Now()
+ r, n, _, err := kc.Get(hash)
+ c.Assert(err, IsNil)
+ c.Check(n, Equals, int64(3))
+ elapsed := time.Since(t0)
+
+ nonsleeptime := time.Second / 10
+ expect := kc.RetryDelay
+ if expect == 0 {
+ expect = DefaultRetryDelay
+ }
+ min := MinimumRetryDelay * 3
+ max := expect + expect*2 + expect*2*2 + nonsleeptime
+ c.Check(elapsed >= min, Equals, true, Commentf("elapsed %v / expect min %v", elapsed, min))
+ c.Check(elapsed <= max, Equals, true, Commentf("elapsed %v / expect max %v", elapsed, max))
+
+ content, err := ioutil.ReadAll(r)
+ c.Check(err, IsNil)
+ c.Check(content, DeepEquals, []byte("foo"))
+ c.Check(r.Close(), IsNil)
+
+ c.Logf("%q", st.reqIDs)
+ if c.Check(st.reqIDs, Not(HasLen), 0) {
+ for _, reqid := range st.reqIDs {
+ c.Check(reqid, Not(Equals), "")
+ c.Check(reqid, Equals, st.reqIDs[0])
+ }
+ }
}
}
func (s *StandaloneSuite) TestGetNetError(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
arv, err := arvadosclient.MakeArvadosClient()
c.Check(err, IsNil)
@@ -842,19 +879,19 @@ func (s *StandaloneSuite) TestGetNetError(c *C) {
arv.ApiToken = "abc123"
kc.SetServiceRoots(map[string]string{"x": "http://localhost:62222"}, nil, nil)
- r, n, url2, err := kc.Get(hash)
+ r, n, _, err := kc.Get(hash)
errNotFound, _ := err.(*ErrNotFound)
- c.Check(errNotFound, NotNil)
- c.Check(strings.Contains(errNotFound.Error(), "connection refused"), Equals, true)
- c.Check(errNotFound.Temporary(), Equals, true)
+ if c.Check(errNotFound, NotNil) {
+ c.Check(strings.Contains(errNotFound.Error(), "connection refused"), Equals, true)
+ c.Check(errNotFound.Temporary(), Equals, true)
+ }
c.Check(n, Equals, int64(0))
- c.Check(url2, Equals, "")
- c.Check(r, Equals, nil)
+ c.Check(r, IsNil)
}
func (s *StandaloneSuite) TestGetWithServiceHint(c *C) {
uuid := "zzzzz-bi6l4-123451234512345"
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
// This one shouldn't be used:
ks0 := RunFakeKeepServer(StubGetHandler{
@@ -882,22 +919,21 @@ func (s *StandaloneSuite) TestGetWithServiceHint(c *C) {
nil,
map[string]string{uuid: ks.url})
- r, n, uri, err := kc.Get(hash + "+K@" + uuid)
- defer r.Close()
- c.Check(err, Equals, nil)
+ r, n, _, err := kc.Get(hash + "+K@" + uuid)
+ c.Assert(err, IsNil)
c.Check(n, Equals, int64(3))
- c.Check(uri, Equals, fmt.Sprintf("%s/%s", ks.url, hash+"+K@"+uuid))
content, err := ioutil.ReadAll(r)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(content, DeepEquals, []byte("foo"))
+ c.Check(r.Close(), IsNil)
}
// Use a service hint to fetch from a local disk service, overriding
// rendezvous probe order.
func (s *StandaloneSuite) TestGetWithLocalServiceHint(c *C) {
uuid := "zzzzz-bi6l4-zzzzzzzzzzzzzzz"
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
// This one shouldn't be used, although it appears first in
// rendezvous probe order:
@@ -905,8 +941,8 @@ func (s *StandaloneSuite) TestGetWithLocalServiceHint(c *C) {
c,
"error if used",
"abc123",
- http.StatusOK,
- []byte("foo")})
+ http.StatusBadGateway,
+ nil})
defer ks0.listener.Close()
// This one should be used:
ks := RunFakeKeepServer(StubGetHandler{
@@ -935,20 +971,19 @@ func (s *StandaloneSuite) TestGetWithLocalServiceHint(c *C) {
uuid: ks.url},
)
- r, n, uri, err := kc.Get(hash + "+K@" + uuid)
- defer r.Close()
- c.Check(err, Equals, nil)
+ r, n, _, err := kc.Get(hash + "+K@" + uuid)
+ c.Assert(err, IsNil)
c.Check(n, Equals, int64(3))
- c.Check(uri, Equals, fmt.Sprintf("%s/%s", ks.url, hash+"+K@"+uuid))
content, err := ioutil.ReadAll(r)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(content, DeepEquals, []byte("foo"))
+ c.Check(r.Close(), IsNil)
}
func (s *StandaloneSuite) TestGetWithServiceHintFailoverToLocals(c *C) {
uuid := "zzzzz-bi6l4-123451234512345"
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
ksLocal := RunFakeKeepServer(StubGetHandler{
c,
@@ -974,15 +1009,14 @@ func (s *StandaloneSuite) TestGetWithServiceHintFailoverToLocals(c *C) {
nil,
map[string]string{uuid: ksGateway.url})
- r, n, uri, err := kc.Get(hash + "+K@" + uuid)
- c.Assert(err, Equals, nil)
- defer r.Close()
+ r, n, _, err := kc.Get(hash + "+K@" + uuid)
+ c.Assert(err, IsNil)
c.Check(n, Equals, int64(3))
- c.Check(uri, Equals, fmt.Sprintf("%s/%s", ksLocal.url, hash+"+K@"+uuid))
content, err := ioutil.ReadAll(r)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(content, DeepEquals, []byte("foo"))
+ c.Check(r.Close(), IsNil)
}
type BarHandler struct {
@@ -995,8 +1029,8 @@ func (h BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
}
func (s *StandaloneSuite) TestChecksum(c *C) {
- foohash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
- barhash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
+ foohash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
+ barhash := fmt.Sprintf("%x+3", md5.Sum([]byte("bar")))
st := BarHandler{make(chan string, 1)}
@@ -1010,25 +1044,36 @@ func (s *StandaloneSuite) TestChecksum(c *C) {
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
r, n, _, err := kc.Get(barhash)
- c.Check(err, IsNil)
- _, err = ioutil.ReadAll(r)
- c.Check(n, Equals, int64(3))
- c.Check(err, Equals, nil)
+ if c.Check(err, IsNil) {
+ _, err = ioutil.ReadAll(r)
+ c.Check(n, Equals, int64(3))
+ c.Check(err, IsNil)
+ }
- <-st.handled
+ select {
+ case <-st.handled:
+ case <-time.After(time.Second):
+ c.Fatal("timed out")
+ }
r, n, _, err = kc.Get(foohash)
- c.Check(err, IsNil)
- _, err = ioutil.ReadAll(r)
- c.Check(n, Equals, int64(3))
+ if err == nil {
+ buf, readerr := ioutil.ReadAll(r)
+ c.Logf("%q", buf)
+ err = readerr
+ }
c.Check(err, Equals, BadChecksum)
- <-st.handled
+ select {
+ case <-st.handled:
+ case <-time.After(time.Second):
+ c.Fatal("timed out")
+ }
}
func (s *StandaloneSuite) TestGetWithFailures(c *C) {
content := []byte("waz")
- hash := fmt.Sprintf("%x", md5.Sum(content))
+ hash := fmt.Sprintf("%x+3", md5.Sum(content))
fh := Error404Handler{
make(chan string, 4)}
@@ -1072,16 +1117,20 @@ func (s *StandaloneSuite) TestGetWithFailures(c *C) {
// an example that passes this Assert.)
c.Assert(NewRootSorter(localRoots, hash).GetSortedRoots()[0], Not(Equals), ks1[0].url)
- r, n, url2, err := kc.Get(hash)
+ r, n, _, err := kc.Get(hash)
- <-fh.handled
- c.Check(err, Equals, nil)
+ select {
+ case <-fh.handled:
+ case <-time.After(time.Second):
+ c.Fatal("timed out")
+ }
+ c.Assert(err, IsNil)
c.Check(n, Equals, int64(3))
- c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
readContent, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
+ c.Check(err2, IsNil)
c.Check(readContent, DeepEquals, content)
+ c.Check(r.Close(), IsNil)
}
func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
@@ -1090,9 +1139,9 @@ func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
arv, err := arvadosclient.MakeArvadosClient()
c.Check(err, IsNil)
kc, err := MakeKeepClient(arv)
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
- hash := fmt.Sprintf("%x", md5.Sum(content))
+ hash := fmt.Sprintf("%x+%d", md5.Sum(content), len(content))
{
n, _, err := kc.Ask(hash)
@@ -1101,29 +1150,32 @@ func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
}
{
hash2, replicas, err := kc.PutB(content)
- c.Check(hash2, Matches, fmt.Sprintf(`%s\+%d\b.*`, hash, len(content)))
+ c.Check(err, IsNil)
+ c.Check(hash2, Matches, `\Q`+hash+`\E\b.*`)
c.Check(replicas, Equals, 2)
- c.Check(err, Equals, nil)
}
{
- r, n, url2, err := kc.Get(hash)
- c.Check(err, Equals, nil)
+ r, n, _, err := kc.Get(hash)
+ c.Check(err, IsNil)
c.Check(n, Equals, int64(len(content)))
- c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
-
- readContent, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
- c.Check(readContent, DeepEquals, content)
+ if c.Check(r, NotNil) {
+ readContent, err := ioutil.ReadAll(r)
+ c.Check(err, IsNil)
+ if c.Check(len(readContent), Equals, len(content)) {
+ c.Check(readContent, DeepEquals, content)
+ }
+ c.Check(r.Close(), IsNil)
+ }
}
{
n, url2, err := kc.Ask(hash)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(n, Equals, int64(len(content)))
- c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
+ c.Check(url2, Matches, "http://localhost:\\d+/\\Q"+hash+"\\E")
}
{
loc, err := kc.LocalLocator(hash)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Assert(len(loc) >= 32, Equals, true)
c.Check(loc[:32], Equals, hash[:32])
}
@@ -1170,7 +1222,7 @@ func (s *StandaloneSuite) TestPutProxy(c *C) {
_, replicas, err := kc.PutB([]byte("foo"))
<-st.handled
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(replicas, Equals, 2)
}
@@ -1204,7 +1256,7 @@ func (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {
func (s *StandaloneSuite) TestMakeLocator(c *C) {
l, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce+3+Aabcde@12345678")
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
c.Check(l.Size, Equals, 3)
c.Check(l.Hints, DeepEquals, []string{"3", "Aabcde@12345678"})
@@ -1212,7 +1264,7 @@ func (s *StandaloneSuite) TestMakeLocator(c *C) {
func (s *StandaloneSuite) TestMakeLocatorNoHints(c *C) {
l, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce")
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
c.Check(l.Size, Equals, -1)
c.Check(l.Hints, DeepEquals, []string{})
@@ -1220,7 +1272,7 @@ func (s *StandaloneSuite) TestMakeLocatorNoHints(c *C) {
func (s *StandaloneSuite) TestMakeLocatorNoSizeHint(c *C) {
l, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce+Aabcde@12345678")
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
c.Check(l.Size, Equals, -1)
c.Check(l.Hints, DeepEquals, []string{"Aabcde@12345678"})
@@ -1229,7 +1281,7 @@ func (s *StandaloneSuite) TestMakeLocatorNoSizeHint(c *C) {
func (s *StandaloneSuite) TestMakeLocatorPreservesUnrecognizedHints(c *C) {
str := "91f372a266fe2bf2823cb8ec7fda31ce+3+Unknown+Kzzzzz+Afoobar"
l, err := MakeLocator(str)
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
c.Check(l.Size, Equals, 3)
c.Check(l.Hints, DeepEquals, []string{"3", "Unknown", "Kzzzzz", "Afoobar"})
@@ -1335,14 +1387,14 @@ func (h StubGetIndexHandler) ServeHTTP(resp http.ResponseWriter, req *http.Reque
}
func (s *StandaloneSuite) TestGetIndexWithNoPrefix(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := StubGetIndexHandler{
c,
"/index",
"abc123",
http.StatusOK,
- []byte(hash + "+3 1443559274\n\n")}
+ []byte(hash + " 1443559274\n\n")}
ks := RunFakeKeepServer(st)
defer ks.listener.Close()
@@ -1358,19 +1410,19 @@ func (s *StandaloneSuite) TestGetIndexWithNoPrefix(c *C) {
c.Check(err, IsNil)
content, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
+ c.Check(err2, IsNil)
c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
}
func (s *StandaloneSuite) TestGetIndexWithPrefix(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := StubGetIndexHandler{
c,
"/index/" + hash[0:3],
"abc123",
http.StatusOK,
- []byte(hash + "+3 1443559274\n\n")}
+ []byte(hash + " 1443559274\n\n")}
ks := RunFakeKeepServer(st)
defer ks.listener.Close()
@@ -1382,15 +1434,15 @@ func (s *StandaloneSuite) TestGetIndexWithPrefix(c *C) {
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
r, err := kc.GetIndex("x", hash[0:3])
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
content, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
+ c.Check(err2, IsNil)
c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
}
func (s *StandaloneSuite) TestGetIndexIncomplete(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := StubGetIndexHandler{
c,
@@ -1413,7 +1465,7 @@ func (s *StandaloneSuite) TestGetIndexIncomplete(c *C) {
}
func (s *StandaloneSuite) TestGetIndexWithNoSuchServer(c *C) {
- hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
st := StubGetIndexHandler{
c,
@@ -1453,55 +1505,78 @@ func (s *StandaloneSuite) TestGetIndexWithNoSuchPrefix(c *C) {
kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
r, err := kc.GetIndex("x", "abcd")
- c.Check(err, Equals, nil)
+ c.Check(err, IsNil)
content, err2 := ioutil.ReadAll(r)
- c.Check(err2, Equals, nil)
+ c.Check(err2, IsNil)
c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
}
func (s *StandaloneSuite) TestPutBRetry(c *C) {
- st := &FailThenSucceedHandler{
- handled: make(chan string, 1),
- successhandler: &StubPutHandler{
- c: c,
- expectPath: Md5String("foo"),
- expectAPIToken: "abc123",
- expectBody: "foo",
- expectStorageClass: "default",
- returnStorageClasses: "",
- handled: make(chan string, 5),
- },
- }
+ DefaultRetryDelay = time.Second / 8
+ MinimumRetryDelay = time.Millisecond
+
+ for _, delay := range []time.Duration{0, time.Nanosecond, time.Second / 8, time.Second / 16} {
+ c.Logf("=== initial delay %v", delay)
+
+ st := &FailThenSucceedHandler{
+ morefails: 5, // handler will fail 6x in total, 3 for each server
+ handled: make(chan string, 10),
+ successhandler: &StubPutHandler{
+ c: c,
+ expectPath: Md5String("foo"),
+ expectAPIToken: "abc123",
+ expectBody: "foo",
+ expectStorageClass: "default",
+ returnStorageClasses: "",
+ handled: make(chan string, 5),
+ },
+ }
- arv, _ := arvadosclient.MakeArvadosClient()
- kc, _ := MakeKeepClient(arv)
+ arv, _ := arvadosclient.MakeArvadosClient()
+ kc, _ := MakeKeepClient(arv)
+ kc.Retries = 3
+ kc.RetryDelay = delay
+ kc.DiskCacheSize = DiskCacheDisabled
+ kc.Want_replicas = 2
- kc.Want_replicas = 2
- arv.ApiToken = "abc123"
- localRoots := make(map[string]string)
- writableLocalRoots := make(map[string]string)
+ arv.ApiToken = "abc123"
+ localRoots := make(map[string]string)
+ writableLocalRoots := make(map[string]string)
- ks := RunSomeFakeKeepServers(st, 2)
+ ks := RunSomeFakeKeepServers(st, 2)
- for i, k := range ks {
- localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
- writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
- defer k.listener.Close()
- }
+ for i, k := range ks {
+ localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+ writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+ defer k.listener.Close()
+ }
- kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+ kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
- hash, replicas, err := kc.PutB([]byte("foo"))
+ t0 := time.Now()
+ hash, replicas, err := kc.PutB([]byte("foo"))
- c.Check(err, Equals, nil)
- c.Check(hash, Equals, "")
- c.Check(replicas, Equals, 2)
+ c.Check(err, IsNil)
+ c.Check(hash, Equals, "")
+ c.Check(replicas, Equals, 2)
+ elapsed := time.Since(t0)
+
+ nonsleeptime := time.Second / 10
+ expect := kc.RetryDelay
+ if expect == 0 {
+ expect = DefaultRetryDelay
+ }
+ min := MinimumRetryDelay * 3
+ max := expect + expect*2 + expect*2*2
+ max += nonsleeptime
+ checkInterval(c, elapsed, min, max)
+ }
}
func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
// Add an additional "testblobstore" keepservice
blobKeepService := make(arvadosclient.Dict)
@@ -1511,13 +1586,13 @@ func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
"service_port": "21321",
"service_type": "testblobstore"}},
&blobKeepService)
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
defer func() { arv.Delete("keep_services", blobKeepService["uuid"].(string), nil, nil) }()
RefreshServiceDiscovery()
// Make a keepclient and ensure that the testblobstore is included
kc, err := MakeKeepClient(arv)
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
// verify kc.LocalRoots
c.Check(len(kc.LocalRoots()), Equals, 3)
@@ -1544,3 +1619,60 @@ func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
c.Assert(kc.foundNonDiskSvc, Equals, true)
c.Assert(kc.httpClient().(*http.Client).Timeout, Equals, 300*time.Second)
}
+
+func (s *StandaloneSuite) TestDelayCalculator_Default(c *C) {
+ MinimumRetryDelay = time.Second / 2
+ DefaultRetryDelay = time.Second
+
+ dc := delayCalculator{InitialMaxDelay: 0}
+ checkInterval(c, dc.Next(), time.Second/2, time.Second)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*2)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*4)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*8)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*10)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*10)
+}
+
+func (s *StandaloneSuite) TestDelayCalculator_SetInitial(c *C) {
+ MinimumRetryDelay = time.Second / 2
+ DefaultRetryDelay = time.Second
+
+ dc := delayCalculator{InitialMaxDelay: time.Second * 2}
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*2)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*4)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*8)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*16)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*20)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*20)
+ checkInterval(c, dc.Next(), time.Second/2, time.Second*20)
+}
+
+func (s *StandaloneSuite) TestDelayCalculator_EnsureSomeLongDelays(c *C) {
+ dc := delayCalculator{InitialMaxDelay: time.Second * 5}
+ var d time.Duration
+ n := 4000
+ for i := 0; i < n; i++ {
+ if i < 20 || i%10 == 0 {
+ c.Logf("i=%d, delay=%v", i, d)
+ }
+ if d = dc.Next(); d > dc.InitialMaxDelay*9 {
+ return
+ }
+ }
+ c.Errorf("after %d trials, never got a delay more than 90%% of expected max %d; last was %v", n, dc.InitialMaxDelay*10, d)
+}
+
+// If InitialMaxDelay is less than MinimumRetryDelay/10, then delay is
+// always MinimumRetryDelay.
+func (s *StandaloneSuite) TestDelayCalculator_InitialLessThanMinimum(c *C) {
+ MinimumRetryDelay = time.Second / 2
+ dc := delayCalculator{InitialMaxDelay: time.Millisecond}
+ for i := 0; i < 20; i++ {
+ c.Check(dc.Next(), Equals, time.Second/2)
+ }
+}
+
+func checkInterval(c *C, t, min, max time.Duration) {
+ c.Check(t >= min, Equals, true, Commentf("got %v which is below expected min %v", t, min))
+ c.Check(t <= max, Equals, true, Commentf("got %v which is above expected max %v", t, max))
+}
diff --git a/sdk/go/keepclient/support.go b/sdk/go/keepclient/support.go
index 8d299815b2..d3d799dc5d 100644
--- a/sdk/go/keepclient/support.go
+++ b/sdk/go/keepclient/support.go
@@ -13,10 +13,12 @@ import (
"io"
"io/ioutil"
"log"
+ "math/rand"
"net/http"
"os"
"strconv"
"strings"
+ "time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
@@ -127,7 +129,7 @@ func (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo [
}
}
-func (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
var resp arvados.BlockWriteResponse
var getReader func() io.Reader
if req.Data == nil && req.Reader == nil {
@@ -149,8 +151,12 @@ func (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOpti
getReader = func() io.Reader { return bytes.NewReader(req.Data[:req.DataSize]) }
} else {
buf := asyncbuf.NewBuffer(make([]byte, 0, req.DataSize))
+ reader := req.Reader
+ if req.Hash != "" {
+ reader = HashCheckingReader{req.Reader, md5.New(), req.Hash}
+ }
go func() {
- _, err := io.Copy(buf, HashCheckingReader{req.Reader, md5.New(), req.Hash})
+ _, err := io.Copy(buf, reader)
buf.CloseWithError(err)
}()
getReader = buf.NewReader
@@ -214,6 +220,7 @@ func (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOpti
replicasPerThread = req.Replicas
}
+ delay := delayCalculator{InitialMaxDelay: kc.RetryDelay}
retriesRemaining := req.Attempts
var retryServers []string
@@ -302,14 +309,17 @@ func (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOpti
}
if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
- (status.statusCode >= 500 && status.statusCode != 503) {
+ (status.statusCode >= 500 && status.statusCode != http.StatusInsufficientStorage) {
// Timeout, too many requests, or other server side failure
- // Do not retry when status code is 503, which means the keep server is full
+ // (do not auto-retry status 507 "full")
retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
}
}
sv = retryServers
+ if len(sv) > 0 {
+ time.Sleep(delay.Next())
+ }
}
return resp, nil
@@ -341,3 +351,37 @@ func parseStorageClassesConfirmedHeader(hdr string) (map[string]int, error) {
}
return classesStored, nil
}
+
+// delayCalculator calculates a series of delays for implementing
+// exponential backoff with jitter. The first call to Next() returns
+// a random duration between MinimumRetryDelay and the specified
+// InitialMaxDelay (or DefaultRetryDelay if 0). The max delay is
+// doubled on each subsequent call to Next(), up to 10x the initial
+// max delay.
+type delayCalculator struct {
+ InitialMaxDelay time.Duration
+ n int // number of delays returned so far
+ nextmax time.Duration
+ limit time.Duration
+}
+
+func (dc *delayCalculator) Next() time.Duration {
+ if dc.nextmax <= MinimumRetryDelay {
+ // initialize
+ if dc.InitialMaxDelay > 0 {
+ dc.nextmax = dc.InitialMaxDelay
+ } else {
+ dc.nextmax = DefaultRetryDelay
+ }
+ dc.limit = 10 * dc.nextmax
+ }
+ d := time.Duration(rand.Float64() * float64(dc.nextmax))
+ if d < MinimumRetryDelay {
+ d = MinimumRetryDelay
+ }
+ dc.nextmax *= 2
+ if dc.nextmax > dc.limit {
+ dc.nextmax = dc.limit
+ }
+ return d
+}
diff --git a/sdk/go/manifest/manifest.go b/sdk/go/manifest/manifest.go
index 954fb710c0..a597003859 100644
--- a/sdk/go/manifest/manifest.go
+++ b/sdk/go/manifest/manifest.go
@@ -11,12 +11,13 @@ package manifest
import (
"errors"
"fmt"
- "git.arvados.org/arvados.git/sdk/go/blockdigest"
"path"
"regexp"
"sort"
"strconv"
"strings"
+
+ "git.arvados.org/arvados.git/sdk/go/blockdigest"
)
var ErrInvalidToken = errors.New("Invalid token")
@@ -467,21 +468,21 @@ func (m segmentedManifest) manifestTextForPath(srcpath, relocate string) string
// If 'srcpath' and 'relocate' are '.' it simply returns an equivalent manifest
// in normalized form.
//
-// Extract(".", ".") // return entire normalized manfest text
+// Extract(".", ".") // return entire normalized manfest text
//
// If 'srcpath' points to a single file, it will return manifest text for just that file.
// The value of "relocate" is can be used to rename the file or set the file stream.
//
-// Extract("./foo", ".") // extract file "foo" and put it in stream "."
-// Extract("./foo", "./bar") // extract file "foo", rename it to "bar" in stream "."
-// Extract("./foo", "./bar/") // extract file "foo", rename it to "./bar/foo"
-// Extract("./foo", "./bar/baz") // extract file "foo", rename it to "./bar/baz")
+// Extract("./foo", ".") // extract file "foo" and put it in stream "."
+// Extract("./foo", "./bar") // extract file "foo", rename it to "bar" in stream "."
+// Extract("./foo", "./bar/") // extract file "foo", rename it to "./bar/foo"
+// Extract("./foo", "./bar/baz") // extract file "foo", rename it to "./bar/baz")
//
// Otherwise it will return the manifest text for all streams with the prefix in "srcpath" and place
// them under the path in "relocate".
//
-// Extract("./stream", ".") // extract "./stream" to "." and "./stream/subdir" to "./subdir")
-// Extract("./stream", "./bar") // extract "./stream" to "./bar" and "./stream/subdir" to "./bar/subdir")
+// Extract("./stream", ".") // extract "./stream" to "." and "./stream/subdir" to "./subdir")
+// Extract("./stream", "./bar") // extract "./stream" to "./bar" and "./stream/subdir" to "./bar/subdir")
func (m Manifest) Extract(srcpath, relocate string) (ret Manifest) {
segmented, err := m.segment()
if err != nil {
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
index ab03d34f19..4bd59a75d7 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
@@ -27,7 +27,7 @@ import java.util.Map;
public abstract class BaseStandardApiClient extends BaseApiClient {
- private static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
+ protected static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseStandardApiClient.class);
BaseStandardApiClient(ConfigProvider config) {
@@ -107,7 +107,7 @@ public abstract class BaseStandardApiClient
return MAPPER.readValue(content, cls);
}
- private String mapToJson(TL type) {
+ protected String mapToJson(TL type) {
ObjectWriter writer = MAPPER.writer().withDefaultPrettyPrinter();
try {
return writer.writeValueAsString(type);
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
index 141f02deba..581253f53c 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
@@ -9,12 +9,18 @@ package org.arvados.client.api.client;
import org.arvados.client.api.model.Collection;
import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.CollectionReplaceFiles;
import org.arvados.client.config.ConfigProvider;
import org.slf4j.Logger;
+import okhttp3.HttpUrl;
+import okhttp3.Request;
+import okhttp3.RequestBody;
+
public class CollectionsApiClient extends BaseStandardApiClient {
private static final String RESOURCE = "collections";
+
private final Logger log = org.slf4j.LoggerFactory.getLogger(CollectionsApiClient.class);
public CollectionsApiClient(ConfigProvider config) {
@@ -28,6 +34,14 @@ public class CollectionsApiClient extends BaseStandardApiClient {
CountingFileRequestBody(final File file, final ProgressListener listener) {
- this.file = file;
- this.listener = listener;
+ super(file, listener);
}
@Override
public long contentLength() {
- return file.length();
- }
-
- @Override
- public MediaType contentType() {
- return CONTENT_BINARY;
+ return requestBodyData.length();
}
@Override
public void writeTo(BufferedSink sink) {
- try (Source source = Okio.source(file)) {
+ try (Source source = Okio.source(requestBodyData)) {
long total = 0;
long read;
@@ -61,24 +46,4 @@ public class CountingFileRequestBody extends RequestBody {
//ignore
}
}
-
- static class TransferData {
-
- private final Logger log = org.slf4j.LoggerFactory.getLogger(TransferData.class);
- private int progressValue;
- private long totalSize;
-
- TransferData(long totalSize) {
- this.progressValue = 0;
- this.totalSize = totalSize;
- }
-
- void updateTransferProgress(long transferred) {
- float progress = (transferred / (float) totalSize) * 100;
- if (progressValue != (int) progress) {
- progressValue = (int) progress;
- log.debug("{} / {} / {}%", transferred, totalSize, progressValue);
- }
- }
- }
}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingRequestBody.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingRequestBody.java
new file mode 100644
index 0000000000..397a1e2306
--- /dev/null
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingRequestBody.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.MediaType;
+import okhttp3.RequestBody;
+import org.slf4j.Logger;
+
+abstract class CountingRequestBody extends RequestBody {
+
+ protected static final int SEGMENT_SIZE = 2048; // okio.Segment.SIZE
+ protected static final MediaType CONTENT_BINARY = MediaType.parse(com.google.common.net.MediaType.OCTET_STREAM.toString());
+
+ protected final ProgressListener listener;
+
+ protected final T requestBodyData;
+
+ CountingRequestBody(T file, final ProgressListener listener) {
+ this.requestBodyData = file;
+ this.listener = listener;
+ }
+
+ @Override
+ public MediaType contentType() {
+ return CONTENT_BINARY;
+ }
+
+ static class TransferData {
+
+ private final Logger log = org.slf4j.LoggerFactory.getLogger(TransferData.class);
+ private int progressValue;
+ private long totalSize;
+
+ TransferData(long totalSize) {
+ this.progressValue = 0;
+ this.totalSize = totalSize;
+ }
+
+ void updateTransferProgress(long transferred) {
+ float progress = (transferred / (float) totalSize) * 100;
+ if (progressValue != (int) progress) {
+ progressValue = (int) progress;
+ log.debug("{} / {} / {}%", transferred, totalSize, progressValue);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingStreamRequestBody.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingStreamRequestBody.java
new file mode 100644
index 0000000000..7c39371697
--- /dev/null
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingStreamRequestBody.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okio.BufferedSink;
+import okio.Okio;
+import okio.Source;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+
+public class CountingStreamRequestBody extends CountingRequestBody {
+
+ CountingStreamRequestBody(final InputStream inputStream, final ProgressListener listener) {
+ super(inputStream, listener);
+ }
+
+ @Override
+ public long contentLength() throws IOException {
+ return requestBodyData.available();
+ }
+
+ @Override
+ public void writeTo(BufferedSink sink) {
+ try (Source source = Okio.source(requestBodyData)) {
+ long total = 0;
+ long read;
+
+ while ((read = source.read(sink.buffer(), SEGMENT_SIZE)) != -1) {
+ total += read;
+ sink.flush();
+ listener.updateProgress(total);
+
+ }
+ } catch (RuntimeException rethrown) {
+ throw rethrown;
+ } catch (Exception ignored) {
+ //ignore
+ }
+ }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java
index a9306ca2ec..c1525e07a7 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java
@@ -9,7 +9,7 @@ package org.arvados.client.api.client;
import okhttp3.Request;
import okhttp3.RequestBody;
-import org.arvados.client.api.client.CountingFileRequestBody.TransferData;
+import org.arvados.client.api.client.CountingRequestBody.TransferData;
import org.arvados.client.common.Headers;
import org.arvados.client.config.ConfigProvider;
import org.slf4j.Logger;
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
index 05d39e9e60..ad37dad2bb 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
@@ -10,9 +10,14 @@ package org.arvados.client.api.client;
import okhttp3.HttpUrl;
import okhttp3.Request;
import okhttp3.RequestBody;
+import okhttp3.Response;
+import okhttp3.ResponseBody;
+
import org.arvados.client.config.ConfigProvider;
import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
public class KeepWebApiClient extends BaseApiClient {
@@ -29,6 +34,27 @@ public class KeepWebApiClient extends BaseApiClient {
return newFileCall(request);
}
+ public InputStream get(String collectionUuid, String filePathName, long start, Long end) throws IOException {
+ Request.Builder builder = this.getRequestBuilder();
+ String rangeValue = "bytes=" + start + "-";
+ if (end != null) {
+ rangeValue += end;
+ }
+ builder.addHeader("Range", rangeValue);
+ Request request = builder.url(this.getUrlBuilder(collectionUuid, filePathName).build()).get().build();
+ Response response = client.newCall(request).execute();
+ if (!response.isSuccessful()) {
+ response.close();
+ throw new IOException("Failed to download file: " + response);
+ }
+ ResponseBody body = response.body();
+ if (body == null) {
+ response.close();
+ throw new IOException("Response body is null for request: " + request);
+ }
+ return body.byteStream();
+ }
+
public String delete(String collectionUuid, String filePathName) {
Request request = getRequestBuilder()
.url(getUrlBuilder(collectionUuid, filePathName).build())
@@ -48,6 +74,16 @@ public class KeepWebApiClient extends BaseApiClient {
return newCall(request);
}
+ public String upload(String collectionUuid, InputStream inputStream, String fileName, ProgressListener progressListener) {
+ RequestBody requestBody = new CountingStreamRequestBody(inputStream, progressListener);
+
+ Request request = getRequestBuilder()
+ .url(getUrlBuilder(collectionUuid, fileName).build())
+ .put(requestBody)
+ .build();
+ return newCall(request);
+ }
+
private HttpUrl.Builder getUrlBuilder(String collectionUuid, String filePathName) {
return new HttpUrl.Builder()
.scheme(config.getApiProtocol())
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java
new file mode 100644
index 0000000000..2ef19cee79
--- /dev/null
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.HashMap;
+import java.util.Map;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class CollectionReplaceFiles {
+
+ @JsonProperty("collection")
+ private CollectionOptions collectionOptions;
+
+ @JsonProperty("replace_files")
+ private Map replaceFiles;
+
+ public CollectionReplaceFiles() {
+ this.collectionOptions = new CollectionOptions();
+ this.replaceFiles = new HashMap<>();
+ }
+
+ public void addFileReplacement(String targetPath, String sourcePath) {
+ this.replaceFiles.put(targetPath, sourcePath);
+ }
+
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ public static class CollectionOptions {
+ @JsonProperty("preserve_version")
+ private boolean preserveVersion;
+
+ public CollectionOptions() {
+ this.preserveVersion = true;
+ }
+
+ public boolean isPreserveVersion() {
+ return preserveVersion;
+ }
+
+ public void setPreserveVersion(boolean preserveVersion) {
+ this.preserveVersion = preserveVersion;
+ }
+ }
+
+ public CollectionOptions getCollectionOptions() {
+ return collectionOptions;
+ }
+
+ public void setCollectionOptions(CollectionOptions collectionOptions) {
+ this.collectionOptions = collectionOptions;
+ }
+
+ public Map getReplaceFiles() {
+ return replaceFiles;
+ }
+
+ public void setReplaceFiles(Map replaceFiles) {
+ this.replaceFiles = replaceFiles;
+ }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java
index ca86c585e8..9230973698 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java
@@ -14,7 +14,7 @@ import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import java.util.List;
@JsonInclude(JsonInclude.Include.NON_NULL)
-@JsonPropertyOrder({ "limit", "offset", "filters", "order", "select", "distinct", "count", "exclude_home_project" })
+@JsonPropertyOrder({ "limit", "offset", "filters", "order", "select", "distinct", "count", "exclude_home_project", "include_old_versions", "include_trash" })
public class ListArgument extends Argument {
@JsonProperty("limit")
@@ -41,7 +41,17 @@ public class ListArgument extends Argument {
@JsonProperty("exclude_home_project")
private Boolean excludeHomeProject;
- ListArgument(Integer limit, Integer offset, List filters, List order, List select, Boolean distinct, Count count, Boolean excludeHomeProject) {
+ @JsonProperty("include_old_versions")
+ private Boolean includeOldVersions;
+
+ @JsonProperty("include_trash")
+ private Boolean includeTrash;
+
+ ListArgument(
+ Integer limit, Integer offset, List filters, List order, List select,
+ Boolean distinct, Count count, Boolean excludeHomeProject, Boolean includeOldVersions,
+ Boolean includeTrash
+ ) {
this.limit = limit;
this.offset = offset;
this.filters = filters;
@@ -50,6 +60,8 @@ public class ListArgument extends Argument {
this.distinct = distinct;
this.count = count;
this.excludeHomeProject = excludeHomeProject;
+ this.includeOldVersions = includeOldVersions;
+ this.includeTrash = includeTrash;
}
public static ListArgumentBuilder builder() {
@@ -74,6 +86,8 @@ public class ListArgument extends Argument {
private Boolean distinct;
private Count count;
private Boolean excludeHomeProject;
+ private Boolean includeOldVersions;
+ private Boolean includeTrash;
ListArgumentBuilder() {
}
@@ -118,8 +132,18 @@ public class ListArgument extends Argument {
return this;
}
+ public ListArgument.ListArgumentBuilder includeOldVersions(Boolean includeOldVersions) {
+ this.includeOldVersions = includeOldVersions;
+ return this;
+ }
+
+ public ListArgument.ListArgumentBuilder includeTrash(Boolean includeTrash) {
+ this.includeTrash = includeTrash;
+ return this;
+ }
+
public ListArgument build() {
- return new ListArgument(limit, offset, filters, order, select, distinct, count, excludeHomeProject);
+ return new ListArgument(limit, offset, filters, order, select, distinct, count, excludeHomeProject, includeOldVersions, includeTrash);
}
public String toString() {
@@ -127,7 +151,10 @@ public class ListArgument extends Argument {
", offset=" + this.offset + ", filters=" + this.filters +
", order=" + this.order + ", select=" + this.select +
", distinct=" + this.distinct + ", count=" + this.count +
- ", excludeHomeProject=" + this.excludeHomeProject + ")";
+ ", excludeHomeProject=" + this.excludeHomeProject +
+ ", includeOldVersions=" + this.includeOldVersions +
+ ", includeTrash=" + this.includeTrash +
+ ")";
}
}
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
index d592b23ac3..e3d706ed0c 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
@@ -11,6 +11,10 @@ import java.io.File;
public class ExternalConfigProvider implements ConfigProvider {
+ private static final int DEFAULT_CONNECTION_TIMEOUT = 60000;
+ private static final int DEFAULT_READ_TIMEOUT = 60000;
+ private static final int DEFAULT_WRITE_TIMEOUT = 60000;
+
private boolean apiHostInsecure;
private String keepWebHost;
private int keepWebPort;
@@ -41,9 +45,9 @@ public class ExternalConfigProvider implements ConfigProvider {
this.fileSplitDirectory = fileSplitDirectory;
this.numberOfCopies = numberOfCopies;
this.numberOfRetries = numberOfRetries;
- this.connectTimeout = 60000;
- this.readTimeout = 60000;
- this.writeTimeout = 60000;
+ this.connectTimeout = DEFAULT_CONNECTION_TIMEOUT;
+ this.readTimeout = DEFAULT_READ_TIMEOUT;
+ this.writeTimeout = DEFAULT_WRITE_TIMEOUT;
}
ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort,
@@ -156,6 +160,9 @@ public class ExternalConfigProvider implements ConfigProvider {
private File fileSplitDirectory;
private int numberOfCopies;
private int numberOfRetries;
+ private int connectTimeout = DEFAULT_CONNECTION_TIMEOUT;
+ private int readTimeout = DEFAULT_READ_TIMEOUT;
+ private int writeTimeout = DEFAULT_WRITE_TIMEOUT;
ExternalConfigProviderBuilder() {
}
@@ -215,8 +222,23 @@ public class ExternalConfigProvider implements ConfigProvider {
return this;
}
+ public ExternalConfigProvider.ExternalConfigProviderBuilder connectTimeout(int connectTimeout) {
+ this.connectTimeout = connectTimeout;
+ return this;
+ }
+
+ public ExternalConfigProvider.ExternalConfigProviderBuilder readTimeout(int readTimeout) {
+ this.readTimeout = readTimeout;
+ return this;
+ }
+
+ public ExternalConfigProvider.ExternalConfigProviderBuilder writeTimeout(int writeTimeout) {
+ this.writeTimeout = writeTimeout;
+ return this;
+ }
+
public ExternalConfigProvider build() {
- return new ExternalConfigProvider(apiHostInsecure, keepWebHost, keepWebPort, apiHost, apiPort, apiToken, apiProtocol, fileSplitSize, fileSplitDirectory, numberOfCopies, numberOfRetries);
+ return new ExternalConfigProvider(apiHostInsecure, keepWebHost, keepWebPort, apiHost, apiPort, apiToken, apiProtocol, fileSplitSize, fileSplitDirectory, numberOfCopies, numberOfRetries, connectTimeout, readTimeout, writeTimeout);
}
}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
index 571cb25909..8b65cebc59 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
@@ -28,6 +28,7 @@ import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
public class ArvadosFacade {
@@ -201,6 +202,21 @@ public class ArvadosFacade {
return collectionsApiClient.create(collection);
}
+ /**
+ * Uploads multiple files to an existing collection.
+ *
+ * @param collectionUUID UUID of collection to which the files are to be copied
+ * @param files map of files to be copied to existing collection.
+ * The map consists of a pair in the form of a filename and a filename
+ * along with the Portable data hash
+ * @return collection object mapped from JSON that is returned from server after successful copied
+ */
+ public Collection updateWithReplaceFiles(String collectionUUID, Map files) {
+ CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();
+ replaceFilesRequest.getReplaceFiles().putAll(files);
+ return collectionsApiClient.update(collectionUUID, replaceFilesRequest);
+ }
+
/**
* Returns current user information based on Api Token provided via configuration
*
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
index c1e8849e39..5bfcabc109 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
@@ -23,6 +23,8 @@ import org.slf4j.Logger;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
@@ -70,6 +72,37 @@ public class FileDownloader {
return downloadedFile;
}
+ public File downloadFileWithResume(String collectionUuid, String fileName, String pathToDownloadFolder, long start, Long end) throws IOException {
+ if (end != null && end < start) {
+ throw new IllegalArgumentException("End index must be greater than or equal to the start index");
+ }
+
+ File destinationFile = new File(pathToDownloadFolder, fileName);
+
+ if (!destinationFile.exists()) {
+ boolean isCreated = destinationFile.createNewFile();
+ if (!isCreated) {
+ throw new IOException("Failed to create new file: " + destinationFile.getAbsolutePath());
+ }
+ }
+
+ try (RandomAccessFile outputFile = new RandomAccessFile(destinationFile, "rw");
+ InputStream inputStream = keepWebApiClient.get(collectionUuid, fileName, start, end)) {
+ outputFile.seek(start);
+
+ long remaining = (end == null) ? Long.MAX_VALUE : end - start + 1;
+ byte[] buffer = new byte[4096];
+ int bytesRead;
+ while ((bytesRead = inputStream.read(buffer)) != -1 && remaining > 0) {
+ int bytesToWrite = (int) Math.min(bytesRead, remaining);
+ outputFile.write(buffer, 0, bytesToWrite);
+ remaining -= bytesToWrite;
+ }
+ }
+
+ return destinationFile;
+ }
+
public List downloadFilesFromCollectionUsingKeepWeb(String collectionUuid, String pathToDownloadFolder) {
String collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder).getAbsolutePath();
List fileTokens = listFileInfoFromCollection(collectionUuid);
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
index 8da3bfbf51..94a79041a0 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
@@ -7,21 +7,39 @@
package org.arvados.client.api.client;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
import okhttp3.mockwebserver.RecordedRequest;
import org.arvados.client.api.model.Collection;
import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.CollectionReplaceFiles;
import org.arvados.client.test.utils.RequestMethod;
import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Before;
import org.junit.Test;
import static org.arvados.client.test.utils.ApiClientTestUtils.*;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
private static final String RESOURCE = "collections";
-
- private CollectionsApiClient client = new CollectionsApiClient(CONFIG);
+ private static final String TEST_COLLECTION_NAME = "Super Collection";
+ private static final String TEST_COLLECTION_UUID = "test-collection-uuid";
+ private ObjectMapper objectMapper;
+ private CollectionsApiClient client;
+
+ @Before
+ public void setUp() {
+ objectMapper = new ObjectMapper();
+ objectMapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
+ client = new CollectionsApiClient(CONFIG);
+ }
@Test
public void listCollections() throws Exception {
@@ -66,7 +84,7 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
// given
server.enqueue(getResponse("collections-create-simple"));
- String name = "Super Collection";
+ String name = TEST_COLLECTION_NAME;
Collection collection = new Collection();
collection.setName(name);
@@ -90,7 +108,7 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
// given
server.enqueue(getResponse("collections-create-manifest"));
- String name = "Super Collection";
+ String name = TEST_COLLECTION_NAME;
String manifestText = ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\n";
Collection collection = new Collection();
@@ -109,4 +127,45 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
assertThat(actual.getManifestText()).isEqualTo(manifestText);
}
+
+ @Test
+ public void testUpdateWithReplaceFiles() throws IOException, InterruptedException {
+ // given
+ server.enqueue(getResponse("collections-create-manifest"));
+
+ Map files = new HashMap<>();
+ files.put("targetPath1", "sourcePath1");
+ files.put("targetPath2", "sourcePath2");
+
+ CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();
+ replaceFilesRequest.setReplaceFiles(files);
+
+ // when
+ Collection actual = client.update(TEST_COLLECTION_UUID, replaceFilesRequest);
+
+ // then
+ RecordedRequest request = server.takeRequest();
+ assertAuthorizationHeader(request);
+ assertRequestPath(request, "collections/test-collection-uuid");
+ assertRequestMethod(request, RequestMethod.PUT);
+ assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
+
+ String actualRequestBody = request.getBody().readUtf8();
+ Map actualRequestMap = objectMapper.readValue(actualRequestBody, Map.class);
+
+ Map expectedRequestMap = new HashMap<>();
+ Map collectionOptionsMap = new HashMap<>();
+ collectionOptionsMap.put("preserve_version", true);
+
+ Map replaceFilesMap = new HashMap<>();
+ replaceFilesMap.put("targetPath1", "sourcePath1");
+ replaceFilesMap.put("targetPath2", "sourcePath2");
+
+ expectedRequestMap.put("collection", collectionOptionsMap);
+ expectedRequestMap.put("replace_files", replaceFilesMap);
+
+ String expectedJson = objectMapper.writeValueAsString(expectedRequestMap);
+ String actualJson = objectMapper.writeValueAsString(actualRequestMap);
+ assertEquals(expectedJson, actualJson);
+ }
}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
index 07b7b25339..9b6b4fa17f 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
@@ -10,15 +10,23 @@ package org.arvados.client.api.client;
import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
import org.junit.Test;
+import java.io.ByteArrayOutputStream;
import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
import java.nio.file.Files;
+import okhttp3.mockwebserver.MockResponse;
+import okio.Buffer;
+
import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertNotNull;
public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {
- private KeepWebApiClient client = new KeepWebApiClient(CONFIG);
+ private final KeepWebApiClient client = new KeepWebApiClient(CONFIG);
@Test
public void uploadFile() throws Exception {
@@ -36,4 +44,38 @@ public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {
assertThat(uploadResponse).isEqualTo("Created");
}
+ @Test
+ public void downloadPartialIsPerformedSuccessfully() throws Exception {
+ // given
+ String collectionUuid = "some-collection-uuid";
+ String filePathName = "sample-file-path";
+ long start = 1024;
+ Long end = null;
+
+ byte[] expectedData = "test data".getBytes();
+
+ try (Buffer buffer = new Buffer().write(expectedData)) {
+ server.enqueue(new MockResponse().setBody(buffer));
+
+ // when
+ InputStream inputStream = client.get(collectionUuid, filePathName, start, end);
+ byte[] actualData = inputStreamToByteArray(inputStream);
+
+ // then
+ assertNotNull(actualData);
+ assertArrayEquals(expectedData, actualData);
+ }
+ }
+
+ private byte[] inputStreamToByteArray(InputStream inputStream) throws IOException {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ int nRead;
+ byte[] data = new byte[1024];
+ while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
+ buffer.write(data, 0, nRead);
+ }
+ buffer.flush();
+ return buffer.toByteArray();
+ }
+
}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java b/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java
index 07269f7e7d..05ba8d1b09 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java
@@ -223,6 +223,9 @@ public class ArvadosFacadeIntegrationTest extends ArvadosClientIntegrationTest {
.fileSplitDirectory(CONFIG.getFileSplitDirectory())
.numberOfCopies(CONFIG.getNumberOfCopies())
.numberOfRetries(CONFIG.getNumberOfRetries())
+ .connectTimeout(CONFIG.getConnectTimeout())
+ .readTimeout(CONFIG.getReadTimeout())
+ .writeTimeout(CONFIG.getWriteTimeout())
.build();
}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
index 0fb1f0206c..741f80f7c9 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
@@ -19,7 +19,6 @@ import org.arvados.client.test.utils.FileTestUtils;
import org.arvados.client.utils.FileMerge;
import org.apache.commons.io.FileUtils;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -27,8 +26,11 @@ import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
+import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -36,6 +38,10 @@ import java.util.UUID;
import static org.arvados.client.test.utils.FileTestUtils.*;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
@@ -80,17 +86,17 @@ public class FileDownloaderTest {
List downloadedFiles = fileDownloader.downloadFilesFromCollection(collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
//then
- Assert.assertEquals(3, downloadedFiles.size()); // 3 files downloaded
+ assertEquals(3, downloadedFiles.size()); // 3 files downloaded
File collectionDir = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionToDownload.getUuid());
- Assert.assertTrue(collectionDir.exists()); // collection directory created
+ assertTrue(collectionDir.exists()); // collection directory created
// 3 files correctly saved
assertThat(downloadedFiles).allMatch(File::exists);
for(int i = 0; i < downloadedFiles.size(); i ++) {
File downloaded = new File(collectionDir + Characters.SLASH + files.get(i).getName());
- Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
+ assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
}
}
@@ -108,9 +114,32 @@ public class FileDownloaderTest {
File downloadedFile = fileDownloader.downloadSingleFileUsingKeepWeb(file.getName(), collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
//then
- Assert.assertTrue(downloadedFile.exists());
- Assert.assertEquals(file.getName(), downloadedFile.getName());
- Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+ assertTrue(downloadedFile.exists());
+ assertEquals(file.getName(), downloadedFile.getName());
+ assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+ }
+
+ @Test
+ public void testDownloadFileWithResume() throws Exception {
+ //given
+ String collectionUuid = "some-collection-uuid";
+ String expectedDataString = "testData";
+ String fileName = "sample-file-name";
+ long start = 0;
+ Long end = null;
+
+ InputStream inputStream = new ByteArrayInputStream(expectedDataString.getBytes());
+
+ when(keepWebApiClient.get(collectionUuid, fileName, start, end)).thenReturn(inputStream);
+
+ //when
+ File downloadedFile = fileDownloader.downloadFileWithResume(collectionUuid, fileName, FILE_DOWNLOAD_TEST_DIR, start, end);
+
+ //then
+ assertNotNull(downloadedFile);
+ assertTrue(downloadedFile.exists());
+ String actualDataString = Files.readString(downloadedFile.toPath());
+ assertEquals("The content of the file does not match the expected data.", expectedDataString, actualDataString);
}
@After
diff --git a/sdk/perl/.gitignore b/sdk/perl/.gitignore
deleted file mode 100644
index 7c32f55981..0000000000
--- a/sdk/perl/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-install
diff --git a/sdk/perl/Makefile.PL b/sdk/perl/Makefile.PL
deleted file mode 100644
index ec903f36ed..0000000000
--- a/sdk/perl/Makefile.PL
+++ /dev/null
@@ -1,18 +0,0 @@
-#! /usr/bin/perl
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-use strict;
-
-use ExtUtils::MakeMaker;
-
-WriteMakefile(
- NAME => 'Arvados',
- VERSION_FROM => 'lib/Arvados.pm',
- PREREQ_PM => {
- 'JSON' => 0,
- 'LWP' => 0,
- 'Net::SSL' => 0,
- },
-);
diff --git a/sdk/perl/lib/Arvados.pm b/sdk/perl/lib/Arvados.pm
deleted file mode 100644
index 9eb04b4ab2..0000000000
--- a/sdk/perl/lib/Arvados.pm
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-=head1 NAME
-
-Arvados -- client library for Arvados services
-
-=head1 SYNOPSIS
-
- use Arvados;
- $arv = Arvados->new(apiHost => 'arvados.local');
-
- my $instances = $arv->{'pipeline_instances'}->{'list'}->execute();
- print "UUID is ", $instances->{'items'}->[0]->{'uuid'}, "\n";
-
- $uuid = 'eiv0u-arx5y-2c5ovx43zw90gvh';
- $instance = $arv->{'pipeline_instances'}->{'get'}->execute('uuid' => $uuid);
- print "ETag is ", $instance->{'etag'}, "\n";
-
- $instance->{'active'} = 1;
- $instance->{'name'} = '';
- $instance->save();
- print "ETag is ", $instance->{'etag'}, "\n";
-
-=head1 METHODS
-
-=head2 new()
-
- my $whc = Arvados->new( %OPTIONS );
-
-Set up a client and retrieve the schema from the server.
-
-=head3 Options
-
-=over
-
-=item apiHost
-
-Hostname of API discovery service. Default: C
-environment variable, or C
-
-=item apiProtocolScheme
-
-Protocol scheme. Default: C environment
-variable, or C
-
-=item authToken
-
-Authorization token. Default: C environment variable
-
-=item apiService
-
-Default C
-
-=item apiVersion
-
-Default C
-
-=back
-
-=cut
-
-package Arvados;
-
-use Net::SSL (); # From Crypt-SSLeay
-BEGIN {
- $Net::HTTPS::SSL_SOCKET_CLASS = "Net::SSL"; # Force use of Net::SSL
-}
-
-use JSON;
-use Carp;
-use Arvados::ResourceAccessor;
-use Arvados::ResourceMethod;
-use Arvados::ResourceProxy;
-use Arvados::ResourceProxyList;
-use Arvados::Request;
-use Data::Dumper;
-
-$Arvados::VERSION = 0.1;
-
-sub new
-{
- my $class = shift;
- my %self = @_;
- my $self = \%self;
- bless ($self, $class);
- return $self->build(@_);
-}
-
-sub build
-{
- my $self = shift;
-
- $config = load_config_file("$ENV{HOME}/.config/arvados/settings.conf");
-
- $self->{'authToken'} ||=
- $ENV{ARVADOS_API_TOKEN} || $config->{ARVADOS_API_TOKEN};
-
- $self->{'apiHost'} ||=
- $ENV{ARVADOS_API_HOST} || $config->{ARVADOS_API_HOST};
-
- $self->{'noVerifyHostname'} ||=
- $ENV{ARVADOS_API_HOST_INSECURE};
-
- $self->{'apiProtocolScheme'} ||=
- $ENV{ARVADOS_API_PROTOCOL_SCHEME} ||
- $config->{ARVADOS_API_PROTOCOL_SCHEME};
-
- $self->{'ua'} = new Arvados::Request;
-
- my $host = $self->{'apiHost'} || 'arvados';
- my $service = $self->{'apiService'} || 'arvados';
- my $version = $self->{'apiVersion'} || 'v1';
- my $scheme = $self->{'apiProtocolScheme'} || 'https';
- my $uri = "$scheme://$host/discovery/v1/apis/$service/$version/rest";
- my $r = $self->new_request;
- $r->set_uri($uri);
- $r->set_method("GET");
- $r->process_request();
- my $data, $headers;
- my ($status_number, $status_phrase) = $r->get_status();
- $data = $r->get_body() if $status_number == 200;
- $headers = $r->get_headers();
- if ($data) {
- my $doc = $self->{'discoveryDocument'} = JSON::decode_json($data);
- print STDERR Dumper $doc if $ENV{'DEBUG_ARVADOS_API_DISCOVERY'};
- my $k, $v;
- while (($k, $v) = each %{$doc->{'resources'}}) {
- $self->{$k} = Arvados::ResourceAccessor->new($self, $k);
- }
- } else {
- croak "No discovery doc at $uri - $status_number $status_phrase";
- }
- $self;
-}
-
-sub new_request
-{
- my $self = shift;
- local $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'};
- if ($self->{'noVerifyHostname'} || ($host =~ /\.local$/)) {
- $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'} = 0;
- }
- Arvados::Request->new();
-}
-
-sub load_config_file ($)
-{
- my $config_file = shift;
- my %config;
-
- if (open (CONF, $config_file)) {
- while () {
- next if /^\s*#/ || /^\s*$/; # skip comments and blank lines
- chomp;
- my ($key, $val) = split /\s*=\s*/, $_, 2;
- $config{$key} = $val;
- }
- }
- close CONF;
- return \%config;
-}
-
-1;
diff --git a/sdk/perl/lib/Arvados/Request.pm b/sdk/perl/lib/Arvados/Request.pm
deleted file mode 100644
index 4523f7d6b3..0000000000
--- a/sdk/perl/lib/Arvados/Request.pm
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-package Arvados::Request;
-use Data::Dumper;
-use LWP::UserAgent;
-use URI::Escape;
-use Encode;
-use strict;
-@Arvados::HTTP::ISA = qw(LWP::UserAgent);
-
-sub new
-{
- my $class = shift;
- my $self = {};
- bless ($self, $class);
- return $self->_init(@_);
-}
-
-sub _init
-{
- my $self = shift;
- $self->{'ua'} = new LWP::UserAgent(@_);
- $self->{'ua'}->agent ("libarvados-perl/".$Arvados::VERSION);
- $self;
-}
-
-sub set_uri
-{
- my $self = shift;
- $self->{'uri'} = shift;
-}
-
-sub process_request
-{
- my $self = shift;
- my %req;
- my %content;
- my $method = $self->{'method'};
- if ($method eq 'GET' || $method eq 'HEAD') {
- $content{'_method'} = $method;
- $method = 'POST';
- }
- $req{$method} = $self->{'uri'};
- $self->{'req'} = new HTTP::Request (%req);
- $self->{'req'}->header('Authorization' => ('OAuth2 ' . $self->{'authToken'})) if $self->{'authToken'};
- $self->{'req'}->header('Accept' => 'application/json');
-
- # allow_nonref lets us encode JSON::true and JSON::false, see #12078
- my $json = JSON->new->allow_nonref;
- my ($p, $v);
- while (($p, $v) = each %{$self->{'queryParams'}}) {
- $content{$p} = (ref($v) eq "") ? $v : $json->encode($v);
- }
- my $content;
- while (($p, $v) = each %content) {
- $content .= '&' unless $content eq '';
- $content .= uri_escape($p);
- $content .= '=';
- $content .= uri_escape($v);
- }
- $self->{'req'}->content_type("application/x-www-form-urlencoded; charset='utf8'");
- $self->{'req'}->content(Encode::encode('utf8', $content));
- $self->{'res'} = $self->{'ua'}->request ($self->{'req'});
-}
-
-sub get_status
-{
- my $self = shift;
- return ($self->{'res'}->code(),
- $self->{'res'}->message());
-}
-
-sub get_body
-{
- my $self = shift;
- return $self->{'res'}->content;
-}
-
-sub set_method
-{
- my $self = shift;
- $self->{'method'} = shift;
-}
-
-sub set_query_params
-{
- my $self = shift;
- $self->{'queryParams'} = shift;
-}
-
-sub set_auth_token
-{
- my $self = shift;
- $self->{'authToken'} = shift;
-}
-
-sub get_headers
-{
- ""
-}
-
-1;
diff --git a/sdk/perl/lib/Arvados/ResourceAccessor.pm b/sdk/perl/lib/Arvados/ResourceAccessor.pm
deleted file mode 100644
index 8b235fc863..0000000000
--- a/sdk/perl/lib/Arvados/ResourceAccessor.pm
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-package Arvados::ResourceAccessor;
-use Carp;
-use Data::Dumper;
-
-sub new
-{
- my $class = shift;
- my $self = {};
- bless ($self, $class);
-
- $self->{'api'} = shift;
- $self->{'resourcesName'} = shift;
- $self->{'methods'} = $self->{'api'}->{'discoveryDocument'}->{'resources'}->{$self->{'resourcesName'}}->{'methods'};
- my $method_name, $method;
- while (($method_name, $method) = each %{$self->{'methods'}}) {
- $self->{$method_name} = Arvados::ResourceMethod->new($self, $method);
- }
- $self;
-}
-
-1;
diff --git a/sdk/perl/lib/Arvados/ResourceMethod.pm b/sdk/perl/lib/Arvados/ResourceMethod.pm
deleted file mode 100644
index d7e86ffdd8..0000000000
--- a/sdk/perl/lib/Arvados/ResourceMethod.pm
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-package Arvados::ResourceMethod;
-use Carp;
-use Data::Dumper;
-
-sub new
-{
- my $class = shift;
- my $self = {};
- bless ($self, $class);
- return $self->_init(@_);
-}
-
-sub _init
-{
- my $self = shift;
- $self->{'resourceAccessor'} = shift;
- $self->{'method'} = shift;
- return $self;
-}
-
-sub execute
-{
- my $self = shift;
- my $method = $self->{'method'};
-
- my $path = $method->{'path'};
-
- my %body_params;
- my %given_params = @_;
- my %extra_params = %given_params;
- my %method_params = %{$method->{'parameters'}};
- if ($method->{'request'}->{'properties'}) {
- while (my ($prop_name, $prop_value) =
- each %{$method->{'request'}->{'properties'}}) {
- if (ref($prop_value) eq 'HASH' && $prop_value->{'$ref'}) {
- $method_params{$prop_name} = { 'type' => 'object' };
- }
- }
- }
- while (my ($param_name, $param) = each %method_params) {
- delete $extra_params{$param_name};
- if ($param->{'required'} && !exists $given_params{$param_name}) {
- croak("Required parameter not supplied: $param_name");
- }
- elsif ($param->{'location'} eq 'path') {
- $path =~ s/{\Q$param_name\E}/$given_params{$param_name}/eg;
- }
- elsif (!exists $given_params{$param_name}) {
- ;
- }
- elsif ($param->{'type'} eq 'object') {
- my %param_value;
- my ($p, $v);
- if (exists $param->{'properties'}) {
- while (my ($property_name, $property) =
- each %{$param->{'properties'}}) {
- # if the discovery doc specifies object structure,
- # convert to true/false depending on supplied type
- if (!exists $given_params{$param_name}->{$property_name}) {
- ;
- }
- elsif (!defined $given_params{$param_name}->{$property_name}) {
- $param_value{$property_name} = JSON::null;
- }
- elsif ($property->{'type'} eq 'boolean') {
- $param_value{$property_name} = $given_params{$param_name}->{$property_name} ? JSON::true : JSON::false;
- }
- else {
- $param_value{$property_name} = $given_params{$param_name}->{$property_name};
- }
- }
- }
- else {
- while (my ($property_name, $property) =
- each %{$given_params{$param_name}}) {
- if (ref $property eq '' || $property eq undef) {
- $param_value{$property_name} = $property;
- }
- elsif (ref $property eq 'HASH') {
- $param_value{$property_name} = {};
- while (my ($k, $v) = each %$property) {
- $param_value{$property_name}->{$k} = $v;
- }
- }
- }
- }
- $body_params{$param_name} = \%param_value;
- } elsif ($param->{'type'} eq 'boolean') {
- $body_params{$param_name} = $given_params{$param_name} ? JSON::true : JSON::false;
- } else {
- $body_params{$param_name} = $given_params{$param_name};
- }
- }
- if (%extra_params) {
- croak("Unsupported parameter(s) passed to API call /$path: \"" . join('", "', keys %extra_params) . '"');
- }
- my $r = $self->{'resourceAccessor'}->{'api'}->new_request;
- my $base_uri = $self->{'resourceAccessor'}->{'api'}->{'discoveryDocument'}->{'baseUrl'};
- $base_uri =~ s:/$::;
- $r->set_uri($base_uri . "/" . $path);
- $r->set_method($method->{'httpMethod'});
- $r->set_auth_token($self->{'resourceAccessor'}->{'api'}->{'authToken'});
- $r->set_query_params(\%body_params) if %body_params;
- $r->process_request();
- my $data, $headers;
- my ($status_number, $status_phrase) = $r->get_status();
- if ($status_number != 200) {
- croak("API call /$path failed: $status_number $status_phrase\n". $r->get_body());
- }
- $data = $r->get_body();
- $headers = $r->get_headers();
- my $result = JSON::decode_json($data);
- if ($method->{'response'}->{'$ref'} =~ /List$/) {
- Arvados::ResourceProxyList->new($result, $self->{'resourceAccessor'});
- } else {
- Arvados::ResourceProxy->new($result, $self->{'resourceAccessor'});
- }
-}
-
-1;
diff --git a/sdk/perl/lib/Arvados/ResourceProxy.pm b/sdk/perl/lib/Arvados/ResourceProxy.pm
deleted file mode 100644
index d3be46812e..0000000000
--- a/sdk/perl/lib/Arvados/ResourceProxy.pm
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-package Arvados::ResourceProxy;
-
-sub new
-{
- my $class = shift;
- my $self = shift;
- $self->{'resourceAccessor'} = shift;
- bless ($self, $class);
- $self;
-}
-
-sub save
-{
- my $self = shift;
- $response = $self->{'resourceAccessor'}->{'update'}->execute('uuid' => $self->{'uuid'}, $self->resource_parameter_name() => $self);
- foreach my $param (keys %$self) {
- if (exists $response->{$param}) {
- $self->{$param} = $response->{$param};
- }
- }
- $self;
-}
-
-sub update_attributes
-{
- my $self = shift;
- my %updates = @_;
- $response = $self->{'resourceAccessor'}->{'update'}->execute('uuid' => $self->{'uuid'}, $self->resource_parameter_name() => \%updates);
- foreach my $param (keys %updates) {
- if (exists $response->{$param}) {
- $self->{$param} = $response->{$param};
- }
- }
- $self;
-}
-
-sub reload
-{
- my $self = shift;
- $response = $self->{'resourceAccessor'}->{'get'}->execute('uuid' => $self->{'uuid'});
- foreach my $param (keys %$self) {
- if (exists $response->{$param}) {
- $self->{$param} = $response->{$param};
- }
- }
- $self;
-}
-
-sub resource_parameter_name
-{
- my $self = shift;
- my $pname = $self->{'resourceAccessor'}->{'resourcesName'};
- $pname =~ s/s$//; # XXX not a very good singularize()
- $pname;
-}
-
-1;
diff --git a/sdk/perl/lib/Arvados/ResourceProxyList.pm b/sdk/perl/lib/Arvados/ResourceProxyList.pm
deleted file mode 100644
index 7d8e1874aa..0000000000
--- a/sdk/perl/lib/Arvados/ResourceProxyList.pm
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-package Arvados::ResourceProxyList;
-
-sub new
-{
- my $class = shift;
- my $self = {};
- bless ($self, $class);
- $self->_init(@_);
-}
-
-sub _init
-{
- my $self = shift;
- $self->{'serverResponse'} = shift;
- $self->{'resourceAccessor'} = shift;
- $self->{'items'} = [ map { Arvados::ResourceProxy->new($_, $self->{'resourceAccessor'}) } @{$self->{'serverResponse'}->{'items'}} ];
- $self;
-}
-
-1;
diff --git a/sdk/python/MANIFEST.in b/sdk/python/MANIFEST.in
index 50a29234be..2dba5819ee 100644
--- a/sdk/python/MANIFEST.in
+++ b/sdk/python/MANIFEST.in
@@ -4,4 +4,6 @@
include LICENSE-2.0.txt
include README.rst
-include arvados_version.py
\ No newline at end of file
+include arvados-v1-discovery.json
+include arvados_version.py
+include discovery2pydoc.py
diff --git a/sdk/python/README.rst b/sdk/python/README.rst
index 570e398a28..e40866c624 100644
--- a/sdk/python/README.rst
+++ b/sdk/python/README.rst
@@ -22,17 +22,29 @@ Installation
Installing under your user account
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This method lets you install the package without root access.
-However, other users on the same system won't be able to use it.
+This method lets you install the package without root access. However,
+other users on the same system will need to reconfigure their shell in order
+to be able to use it. Run the following to install the package in an
+environment at ``~/arvclients``::
-1. Run ``pip install --user arvados-python-client``.
+ python3 -m venv ~/arvclients
+ ~/arvclients/bin/pip install arvados-python-client
-2. In your shell configuration, make sure you add ``$HOME/.local/bin``
- to your PATH environment variable. For example, you could add the
- command ``PATH=$PATH:$HOME/.local/bin`` to your ``.bashrc`` file.
+Command line tools will be installed under ``~/arvclients/bin``. You can
+test one by running::
-3. Reload your shell configuration. For example, bash users could run
- ``source ~/.bashrc``.
+ ~/arvclients/bin/arv-get --version
+
+You can run these tools by specifying the full path every time, or you can
+add the directory to your shell's search path by running::
+
+ export PATH="$PATH:$HOME/arvclients/bin"
+
+You can make this search path change permanent by adding this command to
+your shell's configuration, for example ``~/.bashrc`` if you're using bash.
+You can test the change by running::
+
+ arv-get --version
Installing on Debian systems
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -63,5 +75,5 @@ Testing and Development
This package is one part of the Arvados source package, and it has
integration tests to check interoperability with other Arvados
components. Our `hacking guide
-`_
+`_
describes how to set up a development environment and run tests.
diff --git a/sdk/python/arvados-v1-discovery.json b/sdk/python/arvados-v1-discovery.json
new file mode 100644
index 0000000000..232c88d067
--- /dev/null
+++ b/sdk/python/arvados-v1-discovery.json
@@ -0,0 +1,11322 @@
+{
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://api.arvados.org/auth/arvados": {
+ "description": "View and manage objects"
+ },
+ "https://api.arvados.org/auth/arvados.readonly": {
+ "description": "View objects"
+ }
+ }
+ }
+ },
+ "basePath": "/arvados/v1/",
+ "batchPath": "batch",
+ "description": "The API to interact with Arvados.",
+ "discoveryVersion": "v1",
+ "documentationLink": "http://doc.arvados.org/api/index.html",
+ "id": "arvados:v1",
+ "kind": "discovery#restDescription",
+ "name": "arvados",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ }
+ },
+ "protocol": "rest",
+ "resources": {
+ "api_clients": {
+ "methods": {
+ "get": {
+ "id": "arvados.api_clients.get",
+ "path": "api_clients/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a ApiClient's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClient in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "ApiClient"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.api_clients.list",
+ "path": "api_clients",
+ "httpMethod": "GET",
+ "description": "List ApiClients.\n\n The list
method returns a\n resource list of\n matching ApiClients. For example:\n\n \n {\n \"kind\":\"arvados#apiClientList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.api_clients.create",
+ "path": "api_clients",
+ "httpMethod": "POST",
+ "description": "Create a new ApiClient.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "api_client": {
+ "$ref": "ApiClient"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ApiClient"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.api_clients.update",
+ "path": "api_clients/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing ApiClient.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClient in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "api_client": {
+ "$ref": "ApiClient"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ApiClient"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.api_clients.delete",
+ "path": "api_clients/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing ApiClient.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClient in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ApiClient"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.api_clients.list",
+ "path": "api_clients",
+ "httpMethod": "GET",
+ "description": "List ApiClients.\n\n The list
method returns a\n resource list of\n matching ApiClients. For example:\n\n \n {\n \"kind\":\"arvados#apiClientList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.api_clients.show",
+ "path": "api_clients/{uuid}",
+ "httpMethod": "GET",
+ "description": "show api_clients",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClient"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.api_clients.destroy",
+ "path": "api_clients/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy api_clients",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ApiClient"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "api_client_authorizations": {
+ "methods": {
+ "get": {
+ "id": "arvados.api_client_authorizations.get",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a ApiClientAuthorization's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClientAuthorization in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.api_client_authorizations.list",
+ "path": "api_client_authorizations",
+ "httpMethod": "GET",
+ "description": "List ApiClientAuthorizations.\n\n The list
method returns a\n resource list of\n matching ApiClientAuthorizations. For example:\n\n \n {\n \"kind\":\"arvados#apiClientAuthorizationList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorizationList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.api_client_authorizations.create",
+ "path": "api_client_authorizations",
+ "httpMethod": "POST",
+ "description": "Create a new ApiClientAuthorization.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "api_client_authorization": {
+ "$ref": "ApiClientAuthorization"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.api_client_authorizations.update",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing ApiClientAuthorization.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClientAuthorization in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "api_client_authorization": {
+ "$ref": "ApiClientAuthorization"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.api_client_authorizations.delete",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing ApiClientAuthorization.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ApiClientAuthorization in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "create_system_auth": {
+ "id": "arvados.api_client_authorizations.create_system_auth",
+ "path": "api_client_authorizations/create_system_auth",
+ "httpMethod": "POST",
+ "description": "create_system_auth api_client_authorizations",
+ "parameters": {
+ "api_client_id": {
+ "type": "integer",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "scopes": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.api_client_authorizations.current",
+ "path": "api_client_authorizations/current",
+ "httpMethod": "GET",
+ "description": "current api_client_authorizations",
+ "parameters": {},
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.api_client_authorizations.list",
+ "path": "api_client_authorizations",
+ "httpMethod": "GET",
+ "description": "List ApiClientAuthorizations.\n\n The list
method returns a\n resource list of\n matching ApiClientAuthorizations. For example:\n\n \n {\n \"kind\":\"arvados#apiClientAuthorizationList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorizationList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.api_client_authorizations.show",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "GET",
+ "description": "show api_client_authorizations",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.api_client_authorizations.destroy",
+ "path": "api_client_authorizations/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy api_client_authorizations",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ApiClientAuthorization"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "authorized_keys": {
+ "methods": {
+ "get": {
+ "id": "arvados.authorized_keys.get",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a AuthorizedKey's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the AuthorizedKey in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.authorized_keys.list",
+ "path": "authorized_keys",
+ "httpMethod": "GET",
+ "description": "List AuthorizedKeys.\n\n The list
method returns a\n resource list of\n matching AuthorizedKeys. For example:\n\n \n {\n \"kind\":\"arvados#authorizedKeyList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKeyList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.authorized_keys.create",
+ "path": "authorized_keys",
+ "httpMethod": "POST",
+ "description": "Create a new AuthorizedKey.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "authorized_key": {
+ "$ref": "AuthorizedKey"
+ }
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.authorized_keys.update",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing AuthorizedKey.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the AuthorizedKey in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "authorized_key": {
+ "$ref": "AuthorizedKey"
+ }
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.authorized_keys.delete",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing AuthorizedKey.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the AuthorizedKey in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.authorized_keys.list",
+ "path": "authorized_keys",
+ "httpMethod": "GET",
+ "description": "List AuthorizedKeys.\n\n The list
method returns a\n resource list of\n matching AuthorizedKeys. For example:\n\n \n {\n \"kind\":\"arvados#authorizedKeyList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKeyList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.authorized_keys.show",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "GET",
+ "description": "show authorized_keys",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.authorized_keys.destroy",
+ "path": "authorized_keys/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy authorized_keys",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "AuthorizedKey"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "collections": {
+ "methods": {
+ "get": {
+ "id": "arvados.collections.get",
+ "path": "collections/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Collection's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.collections.list",
+ "path": "collections",
+ "httpMethod": "GET",
+ "description": "List Collections.\n\n The list
method returns a\n resource list of\n matching Collections. For example:\n\n \n {\n \"kind\":\"arvados#collectionList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include collections whose is_trashed attribute is true.",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include past collection versions.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "CollectionList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.collections.create",
+ "path": "collections",
+ "httpMethod": "POST",
+ "description": "Create a new Collection.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "replace_files": {
+ "type": "object",
+ "description": "Files and directories to initialize/replace with content from other collections.",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "collection": {
+ "$ref": "Collection"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.collections.update",
+ "path": "collections/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "replace_files": {
+ "type": "object",
+ "description": "Files and directories to initialize/replace with content from other collections.",
+ "required": false,
+ "location": "query",
+ "properties": {},
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "collection": {
+ "$ref": "Collection"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.collections.delete",
+ "path": "collections/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Collection.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Collection in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "provenance": {
+ "id": "arvados.collections.provenance",
+ "path": "collections/{uuid}/provenance",
+ "httpMethod": "GET",
+ "description": "provenance collections",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "used_by": {
+ "id": "arvados.collections.used_by",
+ "path": "collections/{uuid}/used_by",
+ "httpMethod": "GET",
+ "description": "used_by collections",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "trash": {
+ "id": "arvados.collections.trash",
+ "path": "collections/{uuid}/trash",
+ "httpMethod": "POST",
+ "description": "trash collections",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "untrash": {
+ "id": "arvados.collections.untrash",
+ "path": "collections/{uuid}/untrash",
+ "httpMethod": "POST",
+ "description": "untrash collections",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.collections.list",
+ "path": "collections",
+ "httpMethod": "GET",
+ "description": "List Collections.\n\n The list
method returns a\n resource list of\n matching Collections. For example:\n\n \n {\n \"kind\":\"arvados#collectionList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include collections whose is_trashed attribute is true.",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include past collection versions.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "CollectionList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.collections.show",
+ "path": "collections/{uuid}",
+ "httpMethod": "GET",
+ "description": "show collections",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Show collection even if its is_trashed attribute is true.",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "true",
+ "description": "Include past collection versions.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.collections.destroy",
+ "path": "collections/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy collections",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Collection"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "containers": {
+ "methods": {
+ "get": {
+ "id": "arvados.containers.get",
+ "path": "containers/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Container's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.containers.list",
+ "path": "containers",
+ "httpMethod": "GET",
+ "description": "List Containers.\n\n The list
method returns a\n resource list of\n matching Containers. For example:\n\n \n {\n \"kind\":\"arvados#containerList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.containers.create",
+ "path": "containers",
+ "httpMethod": "POST",
+ "description": "Create a new Container.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container": {
+ "$ref": "Container"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.containers.update",
+ "path": "containers/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Container.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container": {
+ "$ref": "Container"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.containers.delete",
+ "path": "containers/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Container.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Container in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "auth": {
+ "id": "arvados.containers.auth",
+ "path": "containers/{uuid}/auth",
+ "httpMethod": "GET",
+ "description": "auth containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "lock": {
+ "id": "arvados.containers.lock",
+ "path": "containers/{uuid}/lock",
+ "httpMethod": "POST",
+ "description": "lock containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "unlock": {
+ "id": "arvados.containers.unlock",
+ "path": "containers/{uuid}/unlock",
+ "httpMethod": "POST",
+ "description": "unlock containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update_priority": {
+ "id": "arvados.containers.update_priority",
+ "path": "containers/{uuid}/update_priority",
+ "httpMethod": "POST",
+ "description": "update_priority containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "secret_mounts": {
+ "id": "arvados.containers.secret_mounts",
+ "path": "containers/{uuid}/secret_mounts",
+ "httpMethod": "GET",
+ "description": "secret_mounts containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.containers.current",
+ "path": "containers/current",
+ "httpMethod": "GET",
+ "description": "current containers",
+ "parameters": {},
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.containers.list",
+ "path": "containers",
+ "httpMethod": "GET",
+ "description": "List Containers.\n\n The list
method returns a\n resource list of\n matching Containers. For example:\n\n \n {\n \"kind\":\"arvados#containerList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.containers.show",
+ "path": "containers/{uuid}",
+ "httpMethod": "GET",
+ "description": "show containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.containers.destroy",
+ "path": "containers/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy containers",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Container"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "container_requests": {
+ "methods": {
+ "get": {
+ "id": "arvados.container_requests.get",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a ContainerRequest's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ContainerRequest in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.container_requests.list",
+ "path": "container_requests",
+ "httpMethod": "GET",
+ "description": "List ContainerRequests.\n\n The list
method returns a\n resource list of\n matching ContainerRequests. For example:\n\n \n {\n \"kind\":\"arvados#containerRequestList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include container requests whose owner project is trashed.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequestList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.container_requests.create",
+ "path": "container_requests",
+ "httpMethod": "POST",
+ "description": "Create a new ContainerRequest.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container_request": {
+ "$ref": "ContainerRequest"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.container_requests.update",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing ContainerRequest.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ContainerRequest in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "container_request": {
+ "$ref": "ContainerRequest"
+ }
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.container_requests.delete",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing ContainerRequest.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the ContainerRequest in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "container_status": {
+ "id": "arvados.container_requests.container_status",
+ "path": "container_requests/{uuid}/container_status",
+ "httpMethod": "GET",
+ "description": "container_status container_requests",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "required": true,
+ "description": "The UUID of the ContainerRequest in question.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.container_requests.list",
+ "path": "container_requests",
+ "httpMethod": "GET",
+ "description": "List ContainerRequests.\n\n The list
method returns a\n resource list of\n matching ContainerRequests. For example:\n\n \n {\n \"kind\":\"arvados#containerRequestList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include container requests whose owner project is trashed.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequestList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.container_requests.show",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "GET",
+ "description": "show container_requests",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Show container request even if its owner project is trashed.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.container_requests.destroy",
+ "path": "container_requests/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy container_requests",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "ContainerRequest"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "groups": {
+ "methods": {
+ "get": {
+ "id": "arvados.groups.get",
+ "path": "groups/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Group's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.groups.list",
+ "path": "groups",
+ "httpMethod": "GET",
+ "description": "List Groups.\n\n The list
method returns a\n resource list of\n matching Groups. For example:\n\n \n {\n \"kind\":\"arvados#groupList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose is_trashed attribute is true.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "GroupList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.groups.create",
+ "path": "groups",
+ "httpMethod": "POST",
+ "description": "Create a new Group.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "async": {
+ "required": false,
+ "type": "boolean",
+ "location": "query",
+ "default": "false",
+ "description": "defer permissions update"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "group": {
+ "$ref": "Group"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.groups.update",
+ "path": "groups/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Group.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "async": {
+ "required": false,
+ "type": "boolean",
+ "location": "query",
+ "default": "false",
+ "description": "defer permissions update"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "group": {
+ "$ref": "Group"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.groups.delete",
+ "path": "groups/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Group.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Group in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "contents": {
+ "id": "arvados.groups.contents",
+ "path": "groups/contents",
+ "httpMethod": "GET",
+ "description": "contents groups",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose is_trashed attribute is true.",
+ "location": "query"
+ },
+ "uuid": {
+ "type": "string",
+ "required": false,
+ "default": "",
+ "description": "",
+ "location": "query"
+ },
+ "recursive": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include contents from child groups recursively.",
+ "location": "query"
+ },
+ "include": {
+ "type": "string",
+ "required": false,
+ "description": "Include objects referred to by listed field in \"included\" (only owner_uuid).",
+ "location": "query"
+ },
+ "include_old_versions": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include past collection versions.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "shared": {
+ "id": "arvados.groups.shared",
+ "path": "groups/shared",
+ "httpMethod": "GET",
+ "description": "shared groups",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose is_trashed attribute is true.",
+ "location": "query"
+ },
+ "include": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "trash": {
+ "id": "arvados.groups.trash",
+ "path": "groups/{uuid}/trash",
+ "httpMethod": "POST",
+ "description": "trash groups",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "untrash": {
+ "id": "arvados.groups.untrash",
+ "path": "groups/{uuid}/untrash",
+ "httpMethod": "POST",
+ "description": "untrash groups",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.groups.list",
+ "path": "groups",
+ "httpMethod": "GET",
+ "description": "List Groups.\n\n The list
method returns a\n resource list of\n matching Groups. For example:\n\n \n {\n \"kind\":\"arvados#groupList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Include items whose is_trashed attribute is true.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "GroupList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.groups.show",
+ "path": "groups/{uuid}",
+ "httpMethod": "GET",
+ "description": "show groups",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "include_trash": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "Show group/project even if its is_trashed attribute is true.",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.groups.destroy",
+ "path": "groups/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy groups",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Group"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "humans": {
+ "methods": {
+ "get": {
+ "id": "arvados.humans.get",
+ "path": "humans/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Human's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Human in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Human"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.humans.list",
+ "path": "humans",
+ "httpMethod": "GET",
+ "description": "List Humans.\n\n The list
method returns a\n resource list of\n matching Humans. For example:\n\n \n {\n \"kind\":\"arvados#humanList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "HumanList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.humans.create",
+ "path": "humans",
+ "httpMethod": "POST",
+ "description": "Create a new Human.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "human": {
+ "$ref": "Human"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Human"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.humans.update",
+ "path": "humans/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Human.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Human in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "human": {
+ "$ref": "Human"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Human"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.humans.delete",
+ "path": "humans/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Human.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Human in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Human"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.humans.list",
+ "path": "humans",
+ "httpMethod": "GET",
+ "description": "List Humans.\n\n The list
method returns a\n resource list of\n matching Humans. For example:\n\n \n {\n \"kind\":\"arvados#humanList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "HumanList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.humans.show",
+ "path": "humans/{uuid}",
+ "httpMethod": "GET",
+ "description": "show humans",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Human"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.humans.destroy",
+ "path": "humans/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy humans",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Human"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "jobs": {
+ "methods": {
+ "get": {
+ "id": "arvados.jobs.get",
+ "path": "jobs/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Job's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Job in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.jobs.list",
+ "path": "jobs",
+ "httpMethod": "GET",
+ "description": "List Jobs.\n\n The list
method returns a\n resource list of\n matching Jobs. For example:\n\n \n {\n \"kind\":\"arvados#jobList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "JobList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.jobs.create",
+ "path": "jobs",
+ "httpMethod": "POST",
+ "description": "Create a new Job.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "find_or_create": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "minimum_script_version": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "exclude_script_versions": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "job": {
+ "$ref": "Job"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.jobs.update",
+ "path": "jobs/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Job.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Job in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "job": {
+ "$ref": "Job"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.jobs.delete",
+ "path": "jobs/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Job.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Job in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "queue": {
+ "id": "arvados.jobs.queue",
+ "path": "jobs/queue",
+ "httpMethod": "GET",
+ "description": "queue jobs",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "queue_size": {
+ "id": "arvados.jobs.queue_size",
+ "path": "jobs/queue_size",
+ "httpMethod": "GET",
+ "description": "queue_size jobs",
+ "parameters": {},
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "cancel": {
+ "id": "arvados.jobs.cancel",
+ "path": "jobs/{uuid}/cancel",
+ "httpMethod": "POST",
+ "description": "cancel jobs",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "lock": {
+ "id": "arvados.jobs.lock",
+ "path": "jobs/{uuid}/lock",
+ "httpMethod": "POST",
+ "description": "lock jobs",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.jobs.list",
+ "path": "jobs",
+ "httpMethod": "GET",
+ "description": "List Jobs.\n\n The list
method returns a\n resource list of\n matching Jobs. For example:\n\n \n {\n \"kind\":\"arvados#jobList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "JobList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.jobs.show",
+ "path": "jobs/{uuid}",
+ "httpMethod": "GET",
+ "description": "show jobs",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.jobs.destroy",
+ "path": "jobs/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy jobs",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "job_tasks": {
+ "methods": {
+ "get": {
+ "id": "arvados.job_tasks.get",
+ "path": "job_tasks/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a JobTask's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the JobTask in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "JobTask"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.job_tasks.list",
+ "path": "job_tasks",
+ "httpMethod": "GET",
+ "description": "List JobTasks.\n\n The list
method returns a\n resource list of\n matching JobTasks. For example:\n\n \n {\n \"kind\":\"arvados#jobTaskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "JobTaskList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.job_tasks.create",
+ "path": "job_tasks",
+ "httpMethod": "POST",
+ "description": "Create a new JobTask.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "job_task": {
+ "$ref": "JobTask"
+ }
+ }
+ },
+ "response": {
+ "$ref": "JobTask"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.job_tasks.update",
+ "path": "job_tasks/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing JobTask.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the JobTask in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "job_task": {
+ "$ref": "JobTask"
+ }
+ }
+ },
+ "response": {
+ "$ref": "JobTask"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.job_tasks.delete",
+ "path": "job_tasks/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing JobTask.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the JobTask in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "JobTask"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.job_tasks.list",
+ "path": "job_tasks",
+ "httpMethod": "GET",
+ "description": "List JobTasks.\n\n The list
method returns a\n resource list of\n matching JobTasks. For example:\n\n \n {\n \"kind\":\"arvados#jobTaskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "JobTaskList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.job_tasks.show",
+ "path": "job_tasks/{uuid}",
+ "httpMethod": "GET",
+ "description": "show job_tasks",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "JobTask"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.job_tasks.destroy",
+ "path": "job_tasks/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy job_tasks",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "JobTask"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "keep_disks": {
+ "methods": {
+ "get": {
+ "id": "arvados.keep_disks.get",
+ "path": "keep_disks/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a KeepDisk's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepDisk in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.keep_disks.list",
+ "path": "keep_disks",
+ "httpMethod": "GET",
+ "description": "List KeepDisks.\n\n The list
method returns a\n resource list of\n matching KeepDisks. For example:\n\n \n {\n \"kind\":\"arvados#keepDiskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepDiskList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.keep_disks.create",
+ "path": "keep_disks",
+ "httpMethod": "POST",
+ "description": "Create a new KeepDisk.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "keep_disk": {
+ "$ref": "KeepDisk"
+ }
+ }
+ },
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.keep_disks.update",
+ "path": "keep_disks/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing KeepDisk.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepDisk in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "keep_disk": {
+ "$ref": "KeepDisk"
+ }
+ }
+ },
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.keep_disks.delete",
+ "path": "keep_disks/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing KeepDisk.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepDisk in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "ping": {
+ "id": "arvados.keep_disks.ping",
+ "path": "keep_disks/ping",
+ "httpMethod": "POST",
+ "description": "ping keep_disks",
+ "parameters": {
+ "uuid": {
+ "required": false,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ },
+ "ping_secret": {
+ "required": true,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ },
+ "node_uuid": {
+ "required": false,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ },
+ "filesystem_uuid": {
+ "required": false,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ },
+ "service_host": {
+ "required": false,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ },
+ "service_port": {
+ "required": true,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ },
+ "service_ssl_flag": {
+ "required": true,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.keep_disks.list",
+ "path": "keep_disks",
+ "httpMethod": "GET",
+ "description": "List KeepDisks.\n\n The list
method returns a\n resource list of\n matching KeepDisks. For example:\n\n \n {\n \"kind\":\"arvados#keepDiskList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepDiskList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.keep_disks.show",
+ "path": "keep_disks/{uuid}",
+ "httpMethod": "GET",
+ "description": "show keep_disks",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.keep_disks.destroy",
+ "path": "keep_disks/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy keep_disks",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "KeepDisk"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "keep_services": {
+ "methods": {
+ "get": {
+ "id": "arvados.keep_services.get",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a KeepService's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepService in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.keep_services.list",
+ "path": "keep_services",
+ "httpMethod": "GET",
+ "description": "List KeepServices.\n\n The list
method returns a\n resource list of\n matching KeepServices. For example:\n\n \n {\n \"kind\":\"arvados#keepServiceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepServiceList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.keep_services.create",
+ "path": "keep_services",
+ "httpMethod": "POST",
+ "description": "Create a new KeepService.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "keep_service": {
+ "$ref": "KeepService"
+ }
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.keep_services.update",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing KeepService.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepService in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "keep_service": {
+ "$ref": "KeepService"
+ }
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.keep_services.delete",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing KeepService.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the KeepService in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "accessible": {
+ "id": "arvados.keep_services.accessible",
+ "path": "keep_services/accessible",
+ "httpMethod": "GET",
+ "description": "accessible keep_services",
+ "parameters": {},
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.keep_services.list",
+ "path": "keep_services",
+ "httpMethod": "GET",
+ "description": "List KeepServices.\n\n The list
method returns a\n resource list of\n matching KeepServices. For example:\n\n \n {\n \"kind\":\"arvados#keepServiceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepServiceList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.keep_services.show",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "GET",
+ "description": "show keep_services",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.keep_services.destroy",
+ "path": "keep_services/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy keep_services",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "KeepService"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "links": {
+ "methods": {
+ "get": {
+ "id": "arvados.links.get",
+ "path": "links/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Link's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.links.list",
+ "path": "links",
+ "httpMethod": "GET",
+ "description": "List Links.\n\n The list
method returns a\n resource list of\n matching Links. For example:\n\n \n {\n \"kind\":\"arvados#linkList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "LinkList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.links.create",
+ "path": "links",
+ "httpMethod": "POST",
+ "description": "Create a new Link.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "link": {
+ "$ref": "Link"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.links.update",
+ "path": "links/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Link.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "link": {
+ "$ref": "Link"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.links.delete",
+ "path": "links/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Link.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Link in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.links.list",
+ "path": "links",
+ "httpMethod": "GET",
+ "description": "List Links.\n\n The list
method returns a\n resource list of\n matching Links. For example:\n\n \n {\n \"kind\":\"arvados#linkList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "LinkList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.links.show",
+ "path": "links/{uuid}",
+ "httpMethod": "GET",
+ "description": "show links",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.links.destroy",
+ "path": "links/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy links",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_permissions": {
+ "id": "arvados.links.get_permissions",
+ "path": "permissions/{uuid}",
+ "httpMethod": "GET",
+ "description": "get_permissions links",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Link"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "logs": {
+ "methods": {
+ "get": {
+ "id": "arvados.logs.get",
+ "path": "logs/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Log's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Log in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.logs.list",
+ "path": "logs",
+ "httpMethod": "GET",
+ "description": "List Logs.\n\n The list
method returns a\n resource list of\n matching Logs. For example:\n\n \n {\n \"kind\":\"arvados#logList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "LogList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.logs.create",
+ "path": "logs",
+ "httpMethod": "POST",
+ "description": "Create a new Log.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "log": {
+ "$ref": "Log"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.logs.update",
+ "path": "logs/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Log.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Log in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "log": {
+ "$ref": "Log"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.logs.delete",
+ "path": "logs/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Log.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Log in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.logs.list",
+ "path": "logs",
+ "httpMethod": "GET",
+ "description": "List Logs.\n\n The list
method returns a\n resource list of\n matching Logs. For example:\n\n \n {\n \"kind\":\"arvados#logList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "LogList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.logs.show",
+ "path": "logs/{uuid}",
+ "httpMethod": "GET",
+ "description": "show logs",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.logs.destroy",
+ "path": "logs/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy logs",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Log"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "nodes": {
+ "methods": {
+ "get": {
+ "id": "arvados.nodes.get",
+ "path": "nodes/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Node's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Node in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.nodes.list",
+ "path": "nodes",
+ "httpMethod": "GET",
+ "description": "List Nodes.\n\n The list
method returns a\n resource list of\n matching Nodes. For example:\n\n \n {\n \"kind\":\"arvados#nodeList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "NodeList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.nodes.create",
+ "path": "nodes",
+ "httpMethod": "POST",
+ "description": "Create a new Node.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "assign_slot": {
+ "required": false,
+ "type": "boolean",
+ "description": "assign slot and hostname",
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "node": {
+ "$ref": "Node"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.nodes.update",
+ "path": "nodes/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Node.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Node in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "assign_slot": {
+ "required": false,
+ "type": "boolean",
+ "description": "assign slot and hostname",
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "node": {
+ "$ref": "Node"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.nodes.delete",
+ "path": "nodes/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Node.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Node in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "ping": {
+ "id": "arvados.nodes.ping",
+ "path": "nodes/{uuid}/ping",
+ "httpMethod": "POST",
+ "description": "ping nodes",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "ping_secret": {
+ "required": true,
+ "type": "string",
+ "description": "",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.nodes.list",
+ "path": "nodes",
+ "httpMethod": "GET",
+ "description": "List Nodes.\n\n The list
method returns a\n resource list of\n matching Nodes. For example:\n\n \n {\n \"kind\":\"arvados#nodeList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "NodeList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.nodes.show",
+ "path": "nodes/{uuid}",
+ "httpMethod": "GET",
+ "description": "show nodes",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.nodes.destroy",
+ "path": "nodes/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy nodes",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Node"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "pipeline_instances": {
+ "methods": {
+ "get": {
+ "id": "arvados.pipeline_instances.get",
+ "path": "pipeline_instances/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a PipelineInstance's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the PipelineInstance in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.pipeline_instances.list",
+ "path": "pipeline_instances",
+ "httpMethod": "GET",
+ "description": "List PipelineInstances.\n\n The list
method returns a\n resource list of\n matching PipelineInstances. For example:\n\n \n {\n \"kind\":\"arvados#pipelineInstanceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstanceList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.pipeline_instances.create",
+ "path": "pipeline_instances",
+ "httpMethod": "POST",
+ "description": "Create a new PipelineInstance.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "pipeline_instance": {
+ "$ref": "PipelineInstance"
+ }
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.pipeline_instances.update",
+ "path": "pipeline_instances/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing PipelineInstance.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the PipelineInstance in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "pipeline_instance": {
+ "$ref": "PipelineInstance"
+ }
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.pipeline_instances.delete",
+ "path": "pipeline_instances/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing PipelineInstance.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the PipelineInstance in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "cancel": {
+ "id": "arvados.pipeline_instances.cancel",
+ "path": "pipeline_instances/{uuid}/cancel",
+ "httpMethod": "POST",
+ "description": "cancel pipeline_instances",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.pipeline_instances.list",
+ "path": "pipeline_instances",
+ "httpMethod": "GET",
+ "description": "List PipelineInstances.\n\n The list
method returns a\n resource list of\n matching PipelineInstances. For example:\n\n \n {\n \"kind\":\"arvados#pipelineInstanceList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstanceList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.pipeline_instances.show",
+ "path": "pipeline_instances/{uuid}",
+ "httpMethod": "GET",
+ "description": "show pipeline_instances",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.pipeline_instances.destroy",
+ "path": "pipeline_instances/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy pipeline_instances",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "PipelineInstance"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "pipeline_templates": {
+ "methods": {
+ "get": {
+ "id": "arvados.pipeline_templates.get",
+ "path": "pipeline_templates/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a PipelineTemplate's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the PipelineTemplate in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "PipelineTemplate"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.pipeline_templates.list",
+ "path": "pipeline_templates",
+ "httpMethod": "GET",
+ "description": "List PipelineTemplates.\n\n The list
method returns a\n resource list of\n matching PipelineTemplates. For example:\n\n \n {\n \"kind\":\"arvados#pipelineTemplateList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplateList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.pipeline_templates.create",
+ "path": "pipeline_templates",
+ "httpMethod": "POST",
+ "description": "Create a new PipelineTemplate.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "pipeline_template": {
+ "$ref": "PipelineTemplate"
+ }
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplate"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.pipeline_templates.update",
+ "path": "pipeline_templates/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing PipelineTemplate.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the PipelineTemplate in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "pipeline_template": {
+ "$ref": "PipelineTemplate"
+ }
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplate"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.pipeline_templates.delete",
+ "path": "pipeline_templates/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing PipelineTemplate.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the PipelineTemplate in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplate"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.pipeline_templates.list",
+ "path": "pipeline_templates",
+ "httpMethod": "GET",
+ "description": "List PipelineTemplates.\n\n The list
method returns a\n resource list of\n matching PipelineTemplates. For example:\n\n \n {\n \"kind\":\"arvados#pipelineTemplateList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplateList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.pipeline_templates.show",
+ "path": "pipeline_templates/{uuid}",
+ "httpMethod": "GET",
+ "description": "show pipeline_templates",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplate"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.pipeline_templates.destroy",
+ "path": "pipeline_templates/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy pipeline_templates",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "PipelineTemplate"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "repositories": {
+ "methods": {
+ "get": {
+ "id": "arvados.repositories.get",
+ "path": "repositories/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Repository's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Repository in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.repositories.list",
+ "path": "repositories",
+ "httpMethod": "GET",
+ "description": "List Repositories.\n\n The list
method returns a\n resource list of\n matching Repositories. For example:\n\n \n {\n \"kind\":\"arvados#repositoryList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "RepositoryList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.repositories.create",
+ "path": "repositories",
+ "httpMethod": "POST",
+ "description": "Create a new Repository.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "repository": {
+ "$ref": "Repository"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.repositories.update",
+ "path": "repositories/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Repository.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Repository in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "repository": {
+ "$ref": "Repository"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.repositories.delete",
+ "path": "repositories/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Repository.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Repository in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_all_permissions": {
+ "id": "arvados.repositories.get_all_permissions",
+ "path": "repositories/get_all_permissions",
+ "httpMethod": "GET",
+ "description": "get_all_permissions repositories",
+ "parameters": {},
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.repositories.list",
+ "path": "repositories",
+ "httpMethod": "GET",
+ "description": "List Repositories.\n\n The list
method returns a\n resource list of\n matching Repositories. For example:\n\n \n {\n \"kind\":\"arvados#repositoryList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "RepositoryList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.repositories.show",
+ "path": "repositories/{uuid}",
+ "httpMethod": "GET",
+ "description": "show repositories",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.repositories.destroy",
+ "path": "repositories/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy repositories",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Repository"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "specimens": {
+ "methods": {
+ "get": {
+ "id": "arvados.specimens.get",
+ "path": "specimens/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Specimen's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Specimen in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Specimen"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.specimens.list",
+ "path": "specimens",
+ "httpMethod": "GET",
+ "description": "List Specimens.\n\n The list
method returns a\n resource list of\n matching Specimens. For example:\n\n \n {\n \"kind\":\"arvados#specimenList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "SpecimenList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.specimens.create",
+ "path": "specimens",
+ "httpMethod": "POST",
+ "description": "Create a new Specimen.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "specimen": {
+ "$ref": "Specimen"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Specimen"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.specimens.update",
+ "path": "specimens/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Specimen.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Specimen in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "specimen": {
+ "$ref": "Specimen"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Specimen"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.specimens.delete",
+ "path": "specimens/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Specimen.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Specimen in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Specimen"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.specimens.list",
+ "path": "specimens",
+ "httpMethod": "GET",
+ "description": "List Specimens.\n\n The list
method returns a\n resource list of\n matching Specimens. For example:\n\n \n {\n \"kind\":\"arvados#specimenList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "SpecimenList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.specimens.show",
+ "path": "specimens/{uuid}",
+ "httpMethod": "GET",
+ "description": "show specimens",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Specimen"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.specimens.destroy",
+ "path": "specimens/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy specimens",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Specimen"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "traits": {
+ "methods": {
+ "get": {
+ "id": "arvados.traits.get",
+ "path": "traits/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Trait's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Trait in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Trait"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.traits.list",
+ "path": "traits",
+ "httpMethod": "GET",
+ "description": "List Traits.\n\n The list
method returns a\n resource list of\n matching Traits. For example:\n\n \n {\n \"kind\":\"arvados#traitList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "TraitList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.traits.create",
+ "path": "traits",
+ "httpMethod": "POST",
+ "description": "Create a new Trait.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "trait": {
+ "$ref": "Trait"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Trait"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.traits.update",
+ "path": "traits/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Trait.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Trait in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "trait": {
+ "$ref": "Trait"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Trait"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.traits.delete",
+ "path": "traits/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Trait.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Trait in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Trait"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.traits.list",
+ "path": "traits",
+ "httpMethod": "GET",
+ "description": "List Traits.\n\n The list
method returns a\n resource list of\n matching Traits. For example:\n\n \n {\n \"kind\":\"arvados#traitList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "TraitList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.traits.show",
+ "path": "traits/{uuid}",
+ "httpMethod": "GET",
+ "description": "show traits",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Trait"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.traits.destroy",
+ "path": "traits/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy traits",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Trait"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "users": {
+ "methods": {
+ "get": {
+ "id": "arvados.users.get",
+ "path": "users/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a User's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.users.list",
+ "path": "users",
+ "httpMethod": "GET",
+ "description": "List Users.\n\n The list
method returns a\n resource list of\n matching Users. For example:\n\n \n {\n \"kind\":\"arvados#userList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.users.create",
+ "path": "users",
+ "httpMethod": "POST",
+ "description": "Create a new User.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user": {
+ "$ref": "User"
+ }
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.users.update",
+ "path": "users/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing User.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user": {
+ "$ref": "User"
+ }
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.users.delete",
+ "path": "users/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing User.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the User in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "current": {
+ "id": "arvados.users.current",
+ "path": "users/current",
+ "httpMethod": "GET",
+ "description": "current users",
+ "parameters": {},
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "system": {
+ "id": "arvados.users.system",
+ "path": "users/system",
+ "httpMethod": "GET",
+ "description": "system users",
+ "parameters": {},
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "activate": {
+ "id": "arvados.users.activate",
+ "path": "users/{uuid}/activate",
+ "httpMethod": "POST",
+ "description": "activate users",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "setup": {
+ "id": "arvados.users.setup",
+ "path": "users/setup",
+ "httpMethod": "POST",
+ "description": "setup users",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "user": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "repo_name": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "vm_uuid": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "send_notification_email": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "unsetup": {
+ "id": "arvados.users.unsetup",
+ "path": "users/{uuid}/unsetup",
+ "httpMethod": "POST",
+ "description": "unsetup users",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "merge": {
+ "id": "arvados.users.merge",
+ "path": "users/merge",
+ "httpMethod": "POST",
+ "description": "merge users",
+ "parameters": {
+ "new_owner_uuid": {
+ "type": "string",
+ "required": true,
+ "description": "",
+ "location": "query"
+ },
+ "new_user_token": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "redirect_to_new_user": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "old_user_uuid": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "new_user_uuid": {
+ "type": "string",
+ "required": false,
+ "description": "",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.users.list",
+ "path": "users",
+ "httpMethod": "GET",
+ "description": "List Users.\n\n The list
method returns a\n resource list of\n matching Users. For example:\n\n \n {\n \"kind\":\"arvados#userList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.users.show",
+ "path": "users/{uuid}",
+ "httpMethod": "GET",
+ "description": "show users",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.users.destroy",
+ "path": "users/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy users",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "User"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "user_agreements": {
+ "methods": {
+ "get": {
+ "id": "arvados.user_agreements.get",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a UserAgreement's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the UserAgreement in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.user_agreements.list",
+ "path": "user_agreements",
+ "httpMethod": "GET",
+ "description": "List UserAgreements.\n\n The list
method returns a\n resource list of\n matching UserAgreements. For example:\n\n \n {\n \"kind\":\"arvados#userAgreementList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreementList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.user_agreements.create",
+ "path": "user_agreements",
+ "httpMethod": "POST",
+ "description": "Create a new UserAgreement.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user_agreement": {
+ "$ref": "UserAgreement"
+ }
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.user_agreements.update",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing UserAgreement.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the UserAgreement in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "user_agreement": {
+ "$ref": "UserAgreement"
+ }
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.user_agreements.delete",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing UserAgreement.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the UserAgreement in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "signatures": {
+ "id": "arvados.user_agreements.signatures",
+ "path": "user_agreements/signatures",
+ "httpMethod": "GET",
+ "description": "signatures user_agreements",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "sign": {
+ "id": "arvados.user_agreements.sign",
+ "path": "user_agreements/sign",
+ "httpMethod": "POST",
+ "description": "sign user_agreements",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.user_agreements.list",
+ "path": "user_agreements",
+ "httpMethod": "GET",
+ "description": "List UserAgreements.\n\n The list
method returns a\n resource list of\n matching UserAgreements. For example:\n\n \n {\n \"kind\":\"arvados#userAgreementList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreementList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "new": {
+ "id": "arvados.user_agreements.new",
+ "path": "user_agreements/new",
+ "httpMethod": "GET",
+ "description": "new user_agreements",
+ "parameters": {},
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "show": {
+ "id": "arvados.user_agreements.show",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "GET",
+ "description": "show user_agreements",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.user_agreements.destroy",
+ "path": "user_agreements/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy user_agreements",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "UserAgreement"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "virtual_machines": {
+ "methods": {
+ "get": {
+ "id": "arvados.virtual_machines.get",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a VirtualMachine's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.virtual_machines.list",
+ "path": "virtual_machines",
+ "httpMethod": "GET",
+ "description": "List VirtualMachines.\n\n The list
method returns a\n resource list of\n matching VirtualMachines. For example:\n\n \n {\n \"kind\":\"arvados#virtualMachineList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachineList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.virtual_machines.create",
+ "path": "virtual_machines",
+ "httpMethod": "POST",
+ "description": "Create a new VirtualMachine.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "virtual_machine": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.virtual_machines.update",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing VirtualMachine.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "virtual_machine": {
+ "$ref": "VirtualMachine"
+ }
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.virtual_machines.delete",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing VirtualMachine.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the VirtualMachine in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "logins": {
+ "id": "arvados.virtual_machines.logins",
+ "path": "virtual_machines/{uuid}/logins",
+ "httpMethod": "GET",
+ "description": "logins virtual_machines",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "get_all_logins": {
+ "id": "arvados.virtual_machines.get_all_logins",
+ "path": "virtual_machines/get_all_logins",
+ "httpMethod": "GET",
+ "description": "get_all_logins virtual_machines",
+ "parameters": {},
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.virtual_machines.list",
+ "path": "virtual_machines",
+ "httpMethod": "GET",
+ "description": "List VirtualMachines.\n\n The list
method returns a\n resource list of\n matching VirtualMachines. For example:\n\n \n {\n \"kind\":\"arvados#virtualMachineList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachineList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.virtual_machines.show",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "GET",
+ "description": "show virtual_machines",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.virtual_machines.destroy",
+ "path": "virtual_machines/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy virtual_machines",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "VirtualMachine"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "workflows": {
+ "methods": {
+ "get": {
+ "id": "arvados.workflows.get",
+ "path": "workflows/{uuid}",
+ "httpMethod": "GET",
+ "description": "Gets a Workflow's metadata by UUID.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Workflow in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "uuid"
+ ],
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "index": {
+ "id": "arvados.workflows.list",
+ "path": "workflows",
+ "httpMethod": "GET",
+ "description": "List Workflows.\n\n The list
method returns a\n resource list of\n matching Workflows. For example:\n\n \n {\n \"kind\":\"arvados#workflowList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "WorkflowList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "create": {
+ "id": "arvados.workflows.create",
+ "path": "workflows",
+ "httpMethod": "POST",
+ "description": "Create a new Workflow.",
+ "parameters": {
+ "select": {
+ "type": "array",
+ "description": "Attributes of the new object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "ensure_unique_name": {
+ "type": "boolean",
+ "description": "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+ "location": "query",
+ "required": false,
+ "default": "false"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "Create object on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "workflow": {
+ "$ref": "Workflow"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "update": {
+ "id": "arvados.workflows.update",
+ "path": "workflows/{uuid}",
+ "httpMethod": "PUT",
+ "description": "Update attributes of an existing Workflow.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Workflow in question.",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the updated object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "request": {
+ "required": true,
+ "properties": {
+ "workflow": {
+ "$ref": "Workflow"
+ }
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "delete": {
+ "id": "arvados.workflows.delete",
+ "path": "workflows/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "Delete an existing Workflow.",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "The UUID of the Workflow in question.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "list": {
+ "id": "arvados.workflows.list",
+ "path": "workflows",
+ "httpMethod": "GET",
+ "description": "List Workflows.\n\n The list
method returns a\n resource list of\n matching Workflows. For example:\n\n \n {\n \"kind\":\"arvados#workflowList\",\n \"etag\":\"\",\n \"self_link\":\"\",\n \"next_page_token\":\"\",\n \"next_link\":\"\",\n \"items\":[\n ...\n ],\n \"items_available\":745,\n \"_profile\":{\n \"request_time\":0.157236317\n }\n ",
+ "parameters": {
+ "filters": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "where": {
+ "type": "object",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "order": {
+ "type": "array",
+ "required": false,
+ "description": "",
+ "location": "query"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of each object to return in the response.",
+ "required": false,
+ "location": "query"
+ },
+ "distinct": {
+ "type": "boolean",
+ "required": false,
+ "default": "false",
+ "description": "",
+ "location": "query"
+ },
+ "limit": {
+ "type": "integer",
+ "required": false,
+ "default": "100",
+ "description": "",
+ "location": "query"
+ },
+ "offset": {
+ "type": "integer",
+ "required": false,
+ "default": "0",
+ "description": "",
+ "location": "query"
+ },
+ "count": {
+ "type": "string",
+ "required": false,
+ "default": "exact",
+ "description": "",
+ "location": "query"
+ },
+ "cluster_id": {
+ "type": "string",
+ "description": "List objects on a remote federated cluster instead of the current one.",
+ "location": "query",
+ "required": false
+ },
+ "bypass_federation": {
+ "type": "boolean",
+ "required": false,
+ "description": "bypass federation behavior, list items from local instance database only",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "WorkflowList"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ "show": {
+ "id": "arvados.workflows.show",
+ "path": "workflows/{uuid}",
+ "httpMethod": "GET",
+ "description": "show workflows",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ },
+ "select": {
+ "type": "array",
+ "description": "Attributes of the object to return in the response.",
+ "required": false,
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ "destroy": {
+ "id": "arvados.workflows.destroy",
+ "path": "workflows/{uuid}",
+ "httpMethod": "DELETE",
+ "description": "destroy workflows",
+ "parameters": {
+ "uuid": {
+ "type": "string",
+ "description": "",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "response": {
+ "$ref": "Workflow"
+ },
+ "scopes": [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ },
+ "configs": {
+ "methods": {
+ "get": {
+ "id": "arvados.configs.get",
+ "path": "config",
+ "httpMethod": "GET",
+ "description": "Get public config",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ },
+ "vocabularies": {
+ "methods": {
+ "get": {
+ "id": "arvados.vocabularies.get",
+ "path": "vocabulary",
+ "httpMethod": "GET",
+ "description": "Get vocabulary definition",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ },
+ "sys": {
+ "methods": {
+ "get": {
+ "id": "arvados.sys.trash_sweep",
+ "path": "sys/trash_sweep",
+ "httpMethod": "POST",
+ "description": "apply scheduled trash and delete operations",
+ "parameters": {},
+ "parameterOrder": [],
+ "response": {},
+ "scopes": [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ }
+ }
+ }
+ },
+ "revision": "20231117",
+ "schemas": {
+ "ApiClientList": {
+ "id": "ApiClientList",
+ "description": "ApiClient list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#apiClientList.",
+ "default": "arvados#apiClientList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of ApiClients.",
+ "items": {
+ "$ref": "ApiClient"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of ApiClients."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of ApiClients."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "ApiClient": {
+ "id": "ApiClient",
+ "description": "ApiClient",
+ "type": "object",
+ "uuidPrefix": "ozdt8",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "name": {
+ "type": "string"
+ },
+ "url_prefix": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "is_trusted": {
+ "type": "boolean"
+ }
+ }
+ },
+ "ApiClientAuthorizationList": {
+ "id": "ApiClientAuthorizationList",
+ "description": "ApiClientAuthorization list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#apiClientAuthorizationList.",
+ "default": "arvados#apiClientAuthorizationList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of ApiClientAuthorizations.",
+ "items": {
+ "$ref": "ApiClientAuthorization"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of ApiClientAuthorizations."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of ApiClientAuthorizations."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "ApiClientAuthorization": {
+ "id": "ApiClientAuthorization",
+ "description": "ApiClientAuthorization",
+ "type": "object",
+ "uuidPrefix": "gj3su",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "api_token": {
+ "type": "string"
+ },
+ "api_client_id": {
+ "type": "integer"
+ },
+ "user_id": {
+ "type": "integer"
+ },
+ "created_by_ip_address": {
+ "type": "string"
+ },
+ "last_used_by_ip_address": {
+ "type": "string"
+ },
+ "last_used_at": {
+ "type": "datetime"
+ },
+ "expires_at": {
+ "type": "datetime"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "default_owner_uuid": {
+ "type": "string"
+ },
+ "scopes": {
+ "type": "Array"
+ }
+ }
+ },
+ "AuthorizedKeyList": {
+ "id": "AuthorizedKeyList",
+ "description": "AuthorizedKey list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#authorizedKeyList.",
+ "default": "arvados#authorizedKeyList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of AuthorizedKeys.",
+ "items": {
+ "$ref": "AuthorizedKey"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of AuthorizedKeys."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of AuthorizedKeys."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "AuthorizedKey": {
+ "id": "AuthorizedKey",
+ "description": "AuthorizedKey",
+ "type": "object",
+ "uuidPrefix": "fngyi",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "name": {
+ "type": "string"
+ },
+ "key_type": {
+ "type": "string"
+ },
+ "authorized_user_uuid": {
+ "type": "string"
+ },
+ "public_key": {
+ "type": "text"
+ },
+ "expires_at": {
+ "type": "datetime"
+ },
+ "created_at": {
+ "type": "datetime"
+ }
+ }
+ },
+ "CollectionList": {
+ "id": "CollectionList",
+ "description": "Collection list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#collectionList.",
+ "default": "arvados#collectionList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Collections.",
+ "items": {
+ "$ref": "Collection"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Collections."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Collections."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Collection": {
+ "id": "Collection",
+ "description": "Collection",
+ "type": "object",
+ "uuidPrefix": "4zz18",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "portable_data_hash": {
+ "type": "string"
+ },
+ "replication_desired": {
+ "type": "integer"
+ },
+ "replication_confirmed_at": {
+ "type": "datetime"
+ },
+ "replication_confirmed": {
+ "type": "integer"
+ },
+ "manifest_text": {
+ "type": "text"
+ },
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "delete_at": {
+ "type": "datetime"
+ },
+ "trash_at": {
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "type": "boolean"
+ },
+ "storage_classes_desired": {
+ "type": "Array"
+ },
+ "storage_classes_confirmed": {
+ "type": "Array"
+ },
+ "storage_classes_confirmed_at": {
+ "type": "datetime"
+ },
+ "current_version_uuid": {
+ "type": "string"
+ },
+ "version": {
+ "type": "integer"
+ },
+ "preserve_version": {
+ "type": "boolean"
+ },
+ "file_count": {
+ "type": "integer"
+ },
+ "file_size_total": {
+ "type": "integer"
+ }
+ }
+ },
+ "ContainerList": {
+ "id": "ContainerList",
+ "description": "Container list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#containerList.",
+ "default": "arvados#containerList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Containers.",
+ "items": {
+ "$ref": "Container"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Containers."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Containers."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Container": {
+ "id": "Container",
+ "description": "Container",
+ "type": "object",
+ "uuidPrefix": "dz642",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "state": {
+ "type": "string"
+ },
+ "started_at": {
+ "type": "datetime"
+ },
+ "finished_at": {
+ "type": "datetime"
+ },
+ "log": {
+ "type": "string"
+ },
+ "environment": {
+ "type": "Hash"
+ },
+ "cwd": {
+ "type": "string"
+ },
+ "command": {
+ "type": "Array"
+ },
+ "output_path": {
+ "type": "string"
+ },
+ "mounts": {
+ "type": "Hash"
+ },
+ "runtime_constraints": {
+ "type": "Hash"
+ },
+ "output": {
+ "type": "string"
+ },
+ "container_image": {
+ "type": "string"
+ },
+ "progress": {
+ "type": "float"
+ },
+ "priority": {
+ "type": "integer"
+ },
+ "exit_code": {
+ "type": "integer"
+ },
+ "auth_uuid": {
+ "type": "string"
+ },
+ "locked_by_uuid": {
+ "type": "string"
+ },
+ "scheduling_parameters": {
+ "type": "Hash"
+ },
+ "runtime_status": {
+ "type": "Hash"
+ },
+ "runtime_user_uuid": {
+ "type": "text"
+ },
+ "runtime_auth_scopes": {
+ "type": "Array"
+ },
+ "lock_count": {
+ "type": "integer"
+ },
+ "gateway_address": {
+ "type": "string"
+ },
+ "interactive_session_started": {
+ "type": "boolean"
+ },
+ "output_storage_classes": {
+ "type": "Array"
+ },
+ "output_properties": {
+ "type": "Hash"
+ },
+ "cost": {
+ "type": "float"
+ },
+ "subrequests_cost": {
+ "type": "float"
+ }
+ }
+ },
+ "ContainerRequestList": {
+ "id": "ContainerRequestList",
+ "description": "ContainerRequest list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#containerRequestList.",
+ "default": "arvados#containerRequestList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of ContainerRequests.",
+ "items": {
+ "$ref": "ContainerRequest"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of ContainerRequests."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of ContainerRequests."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "ContainerRequest": {
+ "id": "ContainerRequest",
+ "description": "ContainerRequest",
+ "type": "object",
+ "uuidPrefix": "xvhdp",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "text"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "state": {
+ "type": "string"
+ },
+ "requesting_container_uuid": {
+ "type": "string"
+ },
+ "container_uuid": {
+ "type": "string"
+ },
+ "container_count_max": {
+ "type": "integer"
+ },
+ "mounts": {
+ "type": "Hash"
+ },
+ "runtime_constraints": {
+ "type": "Hash"
+ },
+ "container_image": {
+ "type": "string"
+ },
+ "environment": {
+ "type": "Hash"
+ },
+ "cwd": {
+ "type": "string"
+ },
+ "command": {
+ "type": "Array"
+ },
+ "output_path": {
+ "type": "string"
+ },
+ "priority": {
+ "type": "integer"
+ },
+ "expires_at": {
+ "type": "datetime"
+ },
+ "filters": {
+ "type": "text"
+ },
+ "container_count": {
+ "type": "integer"
+ },
+ "use_existing": {
+ "type": "boolean"
+ },
+ "scheduling_parameters": {
+ "type": "Hash"
+ },
+ "output_uuid": {
+ "type": "string"
+ },
+ "log_uuid": {
+ "type": "string"
+ },
+ "output_name": {
+ "type": "string"
+ },
+ "output_ttl": {
+ "type": "integer"
+ },
+ "output_storage_classes": {
+ "type": "Array"
+ },
+ "output_properties": {
+ "type": "Hash"
+ },
+ "cumulative_cost": {
+ "type": "float"
+ }
+ }
+ },
+ "GroupList": {
+ "id": "GroupList",
+ "description": "Group list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#groupList.",
+ "default": "arvados#groupList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Groups.",
+ "items": {
+ "$ref": "Group"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Groups."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Groups."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Group": {
+ "id": "Group",
+ "description": "Group",
+ "type": "object",
+ "uuidPrefix": "j7d0g",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "group_class": {
+ "type": "string"
+ },
+ "trash_at": {
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "type": "boolean"
+ },
+ "delete_at": {
+ "type": "datetime"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "frozen_by_uuid": {
+ "type": "string"
+ }
+ }
+ },
+ "HumanList": {
+ "id": "HumanList",
+ "description": "Human list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#humanList.",
+ "default": "arvados#humanList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Humans.",
+ "items": {
+ "$ref": "Human"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Humans."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Humans."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Human": {
+ "id": "Human",
+ "description": "Human",
+ "type": "object",
+ "uuidPrefix": "7a9it",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "created_at": {
+ "type": "datetime"
+ }
+ }
+ },
+ "JobList": {
+ "id": "JobList",
+ "description": "Job list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#jobList.",
+ "default": "arvados#jobList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Jobs.",
+ "items": {
+ "$ref": "Job"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Jobs."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Jobs."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Job": {
+ "id": "Job",
+ "description": "Job",
+ "type": "object",
+ "uuidPrefix": "8i9sb",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "submit_id": {
+ "type": "string"
+ },
+ "script": {
+ "type": "string"
+ },
+ "script_version": {
+ "type": "string"
+ },
+ "script_parameters": {
+ "type": "Hash"
+ },
+ "cancelled_by_client_uuid": {
+ "type": "string"
+ },
+ "cancelled_by_user_uuid": {
+ "type": "string"
+ },
+ "cancelled_at": {
+ "type": "datetime"
+ },
+ "started_at": {
+ "type": "datetime"
+ },
+ "finished_at": {
+ "type": "datetime"
+ },
+ "running": {
+ "type": "boolean"
+ },
+ "success": {
+ "type": "boolean"
+ },
+ "output": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "is_locked_by_uuid": {
+ "type": "string"
+ },
+ "log": {
+ "type": "string"
+ },
+ "tasks_summary": {
+ "type": "Hash"
+ },
+ "runtime_constraints": {
+ "type": "Hash"
+ },
+ "nondeterministic": {
+ "type": "boolean"
+ },
+ "repository": {
+ "type": "string"
+ },
+ "supplied_script_version": {
+ "type": "string"
+ },
+ "docker_image_locator": {
+ "type": "string"
+ },
+ "priority": {
+ "type": "integer"
+ },
+ "description": {
+ "type": "string"
+ },
+ "state": {
+ "type": "string"
+ },
+ "arvados_sdk_version": {
+ "type": "string"
+ },
+ "components": {
+ "type": "Hash"
+ }
+ }
+ },
+ "JobTaskList": {
+ "id": "JobTaskList",
+ "description": "JobTask list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#jobTaskList.",
+ "default": "arvados#jobTaskList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of JobTasks.",
+ "items": {
+ "$ref": "JobTask"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of JobTasks."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of JobTasks."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "JobTask": {
+ "id": "JobTask",
+ "description": "JobTask",
+ "type": "object",
+ "uuidPrefix": "ot0gb",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "job_uuid": {
+ "type": "string"
+ },
+ "sequence": {
+ "type": "integer"
+ },
+ "parameters": {
+ "type": "Hash"
+ },
+ "output": {
+ "type": "text"
+ },
+ "progress": {
+ "type": "float"
+ },
+ "success": {
+ "type": "boolean"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "created_by_job_task_uuid": {
+ "type": "string"
+ },
+ "qsequence": {
+ "type": "integer"
+ },
+ "started_at": {
+ "type": "datetime"
+ },
+ "finished_at": {
+ "type": "datetime"
+ }
+ }
+ },
+ "KeepDiskList": {
+ "id": "KeepDiskList",
+ "description": "KeepDisk list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#keepDiskList.",
+ "default": "arvados#keepDiskList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of KeepDisks.",
+ "items": {
+ "$ref": "KeepDisk"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of KeepDisks."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of KeepDisks."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "KeepDisk": {
+ "id": "KeepDisk",
+ "description": "KeepDisk",
+ "type": "object",
+ "uuidPrefix": "penuu",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "node_uuid": {
+ "type": "string"
+ },
+ "filesystem_uuid": {
+ "type": "string"
+ },
+ "bytes_total": {
+ "type": "integer"
+ },
+ "bytes_free": {
+ "type": "integer"
+ },
+ "is_readable": {
+ "type": "boolean"
+ },
+ "is_writable": {
+ "type": "boolean"
+ },
+ "last_read_at": {
+ "type": "datetime"
+ },
+ "last_write_at": {
+ "type": "datetime"
+ },
+ "last_ping_at": {
+ "type": "datetime"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "keep_service_uuid": {
+ "type": "string"
+ }
+ }
+ },
+ "KeepServiceList": {
+ "id": "KeepServiceList",
+ "description": "KeepService list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#keepServiceList.",
+ "default": "arvados#keepServiceList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of KeepServices.",
+ "items": {
+ "$ref": "KeepService"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of KeepServices."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of KeepServices."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "KeepService": {
+ "id": "KeepService",
+ "description": "KeepService",
+ "type": "object",
+ "uuidPrefix": "bi6l4",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "service_host": {
+ "type": "string"
+ },
+ "service_port": {
+ "type": "integer"
+ },
+ "service_ssl_flag": {
+ "type": "boolean"
+ },
+ "service_type": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "read_only": {
+ "type": "boolean"
+ }
+ }
+ },
+ "LinkList": {
+ "id": "LinkList",
+ "description": "Link list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#linkList.",
+ "default": "arvados#linkList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Links.",
+ "items": {
+ "$ref": "Link"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Links."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Links."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Link": {
+ "id": "Link",
+ "description": "Link",
+ "type": "object",
+ "uuidPrefix": "o0j2j",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "tail_uuid": {
+ "type": "string"
+ },
+ "link_class": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "head_uuid": {
+ "type": "string"
+ },
+ "properties": {
+ "type": "Hash"
+ }
+ }
+ },
+ "LogList": {
+ "id": "LogList",
+ "description": "Log list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#logList.",
+ "default": "arvados#logList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Logs.",
+ "items": {
+ "$ref": "Log"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Logs."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Logs."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Log": {
+ "id": "Log",
+ "description": "Log",
+ "type": "object",
+ "uuidPrefix": "57u5n",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "id": {
+ "type": "integer"
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "object_uuid": {
+ "type": "string"
+ },
+ "event_at": {
+ "type": "datetime"
+ },
+ "event_type": {
+ "type": "string"
+ },
+ "summary": {
+ "type": "text"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "object_owner_uuid": {
+ "type": "string"
+ }
+ }
+ },
+ "NodeList": {
+ "id": "NodeList",
+ "description": "Node list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#nodeList.",
+ "default": "arvados#nodeList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Nodes.",
+ "items": {
+ "$ref": "Node"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Nodes."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Nodes."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Node": {
+ "id": "Node",
+ "description": "Node",
+ "type": "object",
+ "uuidPrefix": "7ekkf",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "slot_number": {
+ "type": "integer"
+ },
+ "hostname": {
+ "type": "string"
+ },
+ "domain": {
+ "type": "string"
+ },
+ "ip_address": {
+ "type": "string"
+ },
+ "last_ping_at": {
+ "type": "datetime"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "job_uuid": {
+ "type": "string"
+ }
+ }
+ },
+ "PipelineInstanceList": {
+ "id": "PipelineInstanceList",
+ "description": "PipelineInstance list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#pipelineInstanceList.",
+ "default": "arvados#pipelineInstanceList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of PipelineInstances.",
+ "items": {
+ "$ref": "PipelineInstance"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of PipelineInstances."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of PipelineInstances."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "PipelineInstance": {
+ "id": "PipelineInstance",
+ "description": "PipelineInstance",
+ "type": "object",
+ "uuidPrefix": "d1hrv",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "pipeline_template_uuid": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "components": {
+ "type": "Hash"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "state": {
+ "type": "string"
+ },
+ "components_summary": {
+ "type": "Hash"
+ },
+ "started_at": {
+ "type": "datetime"
+ },
+ "finished_at": {
+ "type": "datetime"
+ },
+ "description": {
+ "type": "string"
+ }
+ }
+ },
+ "PipelineTemplateList": {
+ "id": "PipelineTemplateList",
+ "description": "PipelineTemplate list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#pipelineTemplateList.",
+ "default": "arvados#pipelineTemplateList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of PipelineTemplates.",
+ "items": {
+ "$ref": "PipelineTemplate"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of PipelineTemplates."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of PipelineTemplates."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "PipelineTemplate": {
+ "id": "PipelineTemplate",
+ "description": "PipelineTemplate",
+ "type": "object",
+ "uuidPrefix": "p5p6p",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "name": {
+ "type": "string"
+ },
+ "components": {
+ "type": "Hash"
+ },
+ "description": {
+ "type": "string"
+ }
+ }
+ },
+ "RepositoryList": {
+ "id": "RepositoryList",
+ "description": "Repository list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#repositoryList.",
+ "default": "arvados#repositoryList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Repositories.",
+ "items": {
+ "$ref": "Repository"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Repositories."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Repositories."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Repository": {
+ "id": "Repository",
+ "description": "Repository",
+ "type": "object",
+ "uuidPrefix": "s0uqq",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "name": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ }
+ }
+ },
+ "SpecimenList": {
+ "id": "SpecimenList",
+ "description": "Specimen list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#specimenList.",
+ "default": "arvados#specimenList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Specimens.",
+ "items": {
+ "$ref": "Specimen"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Specimens."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Specimens."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Specimen": {
+ "id": "Specimen",
+ "description": "Specimen",
+ "type": "object",
+ "uuidPrefix": "j58dm",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "material": {
+ "type": "string"
+ },
+ "properties": {
+ "type": "Hash"
+ }
+ }
+ },
+ "TraitList": {
+ "id": "TraitList",
+ "description": "Trait list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#traitList.",
+ "default": "arvados#traitList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Traits.",
+ "items": {
+ "$ref": "Trait"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Traits."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Traits."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Trait": {
+ "id": "Trait",
+ "description": "Trait",
+ "type": "object",
+ "uuidPrefix": "q1cn2",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "name": {
+ "type": "string"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "created_at": {
+ "type": "datetime"
+ }
+ }
+ },
+ "UserList": {
+ "id": "UserList",
+ "description": "User list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#userList.",
+ "default": "arvados#userList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Users.",
+ "items": {
+ "$ref": "User"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Users."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Users."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "User": {
+ "id": "User",
+ "description": "User",
+ "type": "object",
+ "uuidPrefix": "tpzed",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "email": {
+ "type": "string"
+ },
+ "first_name": {
+ "type": "string"
+ },
+ "last_name": {
+ "type": "string"
+ },
+ "identity_url": {
+ "type": "string"
+ },
+ "is_admin": {
+ "type": "boolean"
+ },
+ "prefs": {
+ "type": "Hash"
+ },
+ "is_active": {
+ "type": "boolean"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "UserAgreementList": {
+ "id": "UserAgreementList",
+ "description": "UserAgreement list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#userAgreementList.",
+ "default": "arvados#userAgreementList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of UserAgreements.",
+ "items": {
+ "$ref": "UserAgreement"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of UserAgreements."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of UserAgreements."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "UserAgreement": {
+ "id": "UserAgreement",
+ "description": "UserAgreement",
+ "type": "object",
+ "uuidPrefix": "gv0sa",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "portable_data_hash": {
+ "type": "string"
+ },
+ "replication_desired": {
+ "type": "integer"
+ },
+ "replication_confirmed_at": {
+ "type": "datetime"
+ },
+ "replication_confirmed": {
+ "type": "integer"
+ },
+ "manifest_text": {
+ "type": "text"
+ },
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "properties": {
+ "type": "Hash"
+ },
+ "delete_at": {
+ "type": "datetime"
+ },
+ "trash_at": {
+ "type": "datetime"
+ },
+ "is_trashed": {
+ "type": "boolean"
+ },
+ "storage_classes_desired": {
+ "type": "Array"
+ },
+ "storage_classes_confirmed": {
+ "type": "Array"
+ },
+ "storage_classes_confirmed_at": {
+ "type": "datetime"
+ },
+ "current_version_uuid": {
+ "type": "string"
+ },
+ "version": {
+ "type": "integer"
+ },
+ "preserve_version": {
+ "type": "boolean"
+ },
+ "file_count": {
+ "type": "integer"
+ },
+ "file_size_total": {
+ "type": "integer"
+ }
+ }
+ },
+ "VirtualMachineList": {
+ "id": "VirtualMachineList",
+ "description": "VirtualMachine list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#virtualMachineList.",
+ "default": "arvados#virtualMachineList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of VirtualMachines.",
+ "items": {
+ "$ref": "VirtualMachine"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of VirtualMachines."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of VirtualMachines."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "VirtualMachine": {
+ "id": "VirtualMachine",
+ "description": "VirtualMachine",
+ "type": "object",
+ "uuidPrefix": "2x53u",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "hostname": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ }
+ }
+ },
+ "WorkflowList": {
+ "id": "WorkflowList",
+ "description": "Workflow list",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Object type. Always arvados#workflowList.",
+ "default": "arvados#workflowList"
+ },
+ "etag": {
+ "type": "string",
+ "description": "List version."
+ },
+ "items": {
+ "type": "array",
+ "description": "The list of Workflows.",
+ "items": {
+ "$ref": "Workflow"
+ }
+ },
+ "next_link": {
+ "type": "string",
+ "description": "A link to the next page of Workflows."
+ },
+ "next_page_token": {
+ "type": "string",
+ "description": "The page token for the next page of Workflows."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "A link back to this list."
+ }
+ }
+ },
+ "Workflow": {
+ "id": "Workflow",
+ "description": "Workflow",
+ "type": "object",
+ "uuidPrefix": "7fd4e",
+ "properties": {
+ "uuid": {
+ "type": "string"
+ },
+ "etag": {
+ "type": "string",
+ "description": "Object version."
+ },
+ "owner_uuid": {
+ "type": "string"
+ },
+ "created_at": {
+ "type": "datetime"
+ },
+ "modified_at": {
+ "type": "datetime"
+ },
+ "modified_by_client_uuid": {
+ "type": "string"
+ },
+ "modified_by_user_uuid": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "text"
+ },
+ "definition": {
+ "type": "text"
+ }
+ }
+ }
+ },
+ "servicePath": "arvados/v1/",
+ "title": "Arvados API",
+ "version": "v1"
+}
\ No newline at end of file
diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
index c8c7029807..83f658201c 100644
--- a/sdk/python/arvados/__init__.py
+++ b/sdk/python/arvados/__init__.py
@@ -1,53 +1,62 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""Arvados Python SDK
-from __future__ import print_function
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import object
-import bz2
-import fcntl
-import hashlib
-import http.client
-import httplib2
-import json
-import logging
+This module provides the entire Python SDK for Arvados. The most useful modules
+include:
+
+* arvados.api - After you `import arvados`, you can call `arvados.api` as a
+ shortcut to the client constructor function `arvados.api.api`.
+
+* arvados.collection - The `arvados.collection.Collection` class provides a
+ high-level interface to read and write collections. It coordinates sending
+ data to and from Keep, and synchronizing updates with the collection object.
+
+* arvados.util - Utility functions to use mostly in conjunction with the API
+ client object and the results it returns.
+
+Other submodules provide lower-level functionality.
+"""
+
+import logging as stdliblog
import os
-import pprint
-import re
-import string
import sys
-import time
import types
-import zlib
-if sys.version_info >= (3, 0):
- from collections import UserDict
-else:
- from UserDict import UserDict
+from collections import UserDict
-from .api import api, api_from_config, http_cache
+from . import api, errors, util
+from .api import api_from_config, http_cache
from .collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
from arvados.keep import *
from arvados.stream import *
from .arvfile import StreamFileReader
+from .logging import log_format, log_date_format, log_handler
from .retry import RetryLoop
-import arvados.errors as errors
-import arvados.util as util
+
+# Previous versions of the PySDK used to say `from .api import api`. This
+# made it convenient to call the API client constructor, but difficult to
+# access the rest of the `arvados.api` module. The magic below fixes that
+# bug while retaining backwards compatibility: `arvados.api` is now the
+# module and you can import it normally, but we make that module callable so
+# all the existing code that says `arvados.api('v1', ...)` still works.
+class _CallableAPIModule(api.__class__):
+ __call__ = staticmethod(api.api)
+api.__class__ = _CallableAPIModule
+
+# Override logging module pulled in via `from ... import *`
+# so users can `import arvados.logging`.
+logging = sys.modules['arvados.logging']
# Set up Arvados logging based on the user's configuration.
# All Arvados code should log under the arvados hierarchy.
-log_format = '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s'
-log_date_format = '%Y-%m-%d %H:%M:%S'
-log_handler = logging.StreamHandler()
-log_handler.setFormatter(logging.Formatter(log_format, log_date_format))
-logger = logging.getLogger('arvados')
+logger = stdliblog.getLogger('arvados')
logger.addHandler(log_handler)
-logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
- else logging.WARNING)
+logger.setLevel(stdliblog.DEBUG if config.get('ARVADOS_DEBUG')
+ else stdliblog.WARNING)
+@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def task_set_output(self, s, num_retries=5):
for tries_left in RetryLoop(num_retries=num_retries, backoff_start=0):
try:
@@ -65,6 +74,7 @@ def task_set_output(self, s, num_retries=5):
raise
_current_task = None
+@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def current_task(num_retries=5):
global _current_task
if _current_task:
@@ -85,6 +95,7 @@ def current_task(num_retries=5):
raise
_current_job = None
+@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def current_job(num_retries=5):
global _current_job
if _current_job:
@@ -103,21 +114,26 @@ def current_job(num_retries=5):
else:
raise
+@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def getjobparam(*args):
return current_job()['script_parameters'].get(*args)
+@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def get_job_param_mount(*args):
return os.path.join(os.environ['TASK_KEEPMOUNT'], current_job()['script_parameters'].get(*args))
+@util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def get_task_param_mount(*args):
return os.path.join(os.environ['TASK_KEEPMOUNT'], current_task()['parameters'].get(*args))
class JobTask(object):
+ @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def __init__(self, parameters=dict(), runtime_constraints=dict()):
print("init jobtask %s %s" % (parameters, runtime_constraints))
class job_setup(object):
@staticmethod
+ @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=False, api_client=None):
if if_sequence != current_task()['sequence']:
return
@@ -150,6 +166,7 @@ class job_setup(object):
exit(0)
@staticmethod
+ @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def one_task_per_input_stream(if_sequence=0, and_end_task=True):
if if_sequence != current_task()['sequence']:
return
diff --git a/sdk/python/arvados/_normalize_stream.py b/sdk/python/arvados/_normalize_stream.py
index 485c757e7f..c72b82be1c 100644
--- a/sdk/python/arvados/_normalize_stream.py
+++ b/sdk/python/arvados/_normalize_stream.py
@@ -8,9 +8,7 @@ from . import config
import re
def escape(path):
- path = re.sub('\\\\', lambda m: '\\134', path)
- path = re.sub('[:\000-\040]', lambda m: "\\%03o" % ord(m.group(0)), path)
- return path
+ return re.sub(r'[\\:\000-\040]', lambda m: "\\%03o" % ord(m.group(0)), path)
def normalize_stream(stream_name, stream):
"""Take manifest stream and return a list of tokens in normalized format.
diff --git a/sdk/python/arvados/_pycurlhelper.py b/sdk/python/arvados/_pycurlhelper.py
new file mode 100644
index 0000000000..749548a7fc
--- /dev/null
+++ b/sdk/python/arvados/_pycurlhelper.py
@@ -0,0 +1,86 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import collections
+import socket
+import pycurl
+import math
+
+class PyCurlHelper:
+ # Default Keep server connection timeout: 2 seconds
+ # Default Keep server read timeout: 256 seconds
+ # Default Keep server bandwidth minimum: 32768 bytes per second
+ # Default Keep proxy connection timeout: 20 seconds
+ # Default Keep proxy read timeout: 256 seconds
+ # Default Keep proxy bandwidth minimum: 32768 bytes per second
+ DEFAULT_TIMEOUT = (2, 256, 32768)
+ DEFAULT_PROXY_TIMEOUT = (20, 256, 32768)
+
+ def __init__(self, title_case_headers=False):
+ self._socket = None
+ self.title_case_headers = title_case_headers
+
+ def _socket_open(self, *args, **kwargs):
+ if len(args) + len(kwargs) == 2:
+ return self._socket_open_pycurl_7_21_5(*args, **kwargs)
+ else:
+ return self._socket_open_pycurl_7_19_3(*args, **kwargs)
+
+ def _socket_open_pycurl_7_19_3(self, family, socktype, protocol, address=None):
+ return self._socket_open_pycurl_7_21_5(
+ purpose=None,
+ address=collections.namedtuple(
+ 'Address', ['family', 'socktype', 'protocol', 'addr'],
+ )(family, socktype, protocol, address))
+
+ def _socket_open_pycurl_7_21_5(self, purpose, address):
+ """Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE"""
+ s = socket.socket(address.family, address.socktype, address.protocol)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ # Will throw invalid protocol error on mac. This test prevents that.
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
+ s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
+ self._socket = s
+ return s
+
+ def _setcurltimeouts(self, curl, timeouts, ignore_bandwidth=False):
+ if not timeouts:
+ return
+ elif isinstance(timeouts, tuple):
+ if len(timeouts) == 2:
+ conn_t, xfer_t = timeouts
+ bandwidth_bps = self.DEFAULT_TIMEOUT[2]
+ else:
+ conn_t, xfer_t, bandwidth_bps = timeouts
+ else:
+ conn_t, xfer_t = (timeouts, timeouts)
+ bandwidth_bps = self.DEFAULT_TIMEOUT[2]
+ curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(conn_t*1000))
+ if not ignore_bandwidth:
+ curl.setopt(pycurl.LOW_SPEED_TIME, int(math.ceil(xfer_t)))
+ curl.setopt(pycurl.LOW_SPEED_LIMIT, int(math.ceil(bandwidth_bps)))
+
+ def _headerfunction(self, header_line):
+ if isinstance(header_line, bytes):
+ header_line = header_line.decode('iso-8859-1')
+ if ':' in header_line:
+ name, value = header_line.split(':', 1)
+ if self.title_case_headers:
+ name = name.strip().title()
+ else:
+ name = name.strip().lower()
+ value = value.strip()
+ elif self._headers:
+ name = self._lastheadername
+ value = self._headers[name] + ' ' + header_line.strip()
+ elif header_line.startswith('HTTP/'):
+ name = 'x-status-line'
+ value = header_line
+ else:
+ _logger.error("Unexpected header line: %s", header_line)
+ return
+ self._lastheadername = name
+ self._headers[name] = value
+ # Returning None implies all bytes were written
diff --git a/sdk/python/arvados/api.py b/sdk/python/arvados/api.py
index db1d0f4e12..8a17e42fcb 100644
--- a/sdk/python/arvados/api.py
+++ b/sdk/python/arvados/api.py
@@ -1,61 +1,82 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""Arvados API client
+
+The code in this module builds Arvados API client objects you can use to submit
+Arvados API requests. This includes extending the underlying HTTP client with
+niceties such as caching, X-Request-Id header for tracking, and more. The main
+client constructors are `api` and `api_from_config`.
+"""
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import range
import collections
-import http.client
import httplib2
import json
import logging
import os
+import pathlib
import re
import socket
import ssl
import sys
+import threading
import time
import types
+from typing import (
+ Any,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+)
+
import apiclient
+import apiclient.http
from apiclient import discovery as apiclient_discovery
from apiclient import errors as apiclient_errors
from . import config
from . import errors
+from . import retry
from . import util
from . import cache
+from .logging import GoogleHTTPClientFilter, log_handler
_logger = logging.getLogger('arvados.api')
+_googleapiclient_log_lock = threading.Lock()
MAX_IDLE_CONNECTION_DURATION = 30
-RETRY_DELAY_INITIAL = 2
-RETRY_DELAY_BACKOFF = 2
-RETRY_COUNT = 2
+"""
+Number of seconds that API client HTTP connections should be allowed to idle
+in keepalive state before they are forced closed. Client code can adjust this
+constant, and it will be used for all Arvados API clients constructed after
+that point.
+"""
+
+# An unused HTTP 5xx status code to request a retry internally.
+# See _intercept_http_request. This should not be user-visible.
+_RETRY_4XX_STATUS = 545
if sys.version_info >= (3,):
httplib2.SSLHandshakeError = None
-class OrderedJsonModel(apiclient.model.JsonModel):
- """Model class for JSON that preserves the contents' order.
-
- API clients that care about preserving the order of fields in API
- server responses can use this model to do so, like this::
-
- from arvados.api import OrderedJsonModel
- client = arvados.api('v1', ..., model=OrderedJsonModel())
- """
-
- def deserialize(self, content):
- # This is a very slightly modified version of the parent class'
- # implementation. Copyright (c) 2010 Google.
- content = content.decode('utf-8')
- body = json.loads(content, object_pairs_hook=collections.OrderedDict)
- if self._data_wrapper and isinstance(body, dict) and 'data' in body:
- body = body['data']
- return body
-
+_orig_retry_request = apiclient.http._retry_request
+def _retry_request(http, num_retries, *args, **kwargs):
+ try:
+ num_retries = max(num_retries, http.num_retries)
+ except AttributeError:
+ # `http` client object does not have a `num_retries` attribute.
+ # It apparently hasn't gone through _patch_http_request, possibly
+ # because this isn't an Arvados API client. Pass through to
+ # avoid interfering with other Google API clients.
+ return _orig_retry_request(http, num_retries, *args, **kwargs)
+ response, body = _orig_retry_request(http, num_retries, *args, **kwargs)
+ # If _intercept_http_request ran out of retries for a 4xx response,
+ # restore the original status code.
+ if response.status == _RETRY_4XX_STATUS:
+ response.status = int(response['status'])
+ return (response, body)
+apiclient.http._retry_request = _retry_request
def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
if not headers.get('X-Request-Id'):
@@ -66,17 +87,9 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
self.max_request_size < len(kwargs['body'])):
raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
- if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
- headers['X-External-Client'] = '1'
-
headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
- retryable = method in [
- 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT']
- retry_count = self._retry_count if retryable else 0
-
- if (not retryable and
- time.time() - self._last_request_time > self._max_keepalive_idle):
+ if (time.time() - self._last_request_time) > self._max_keepalive_idle:
# High probability of failure due to connection atrophy. Make
# sure this request [re]opens a new connection by closing and
# forgetting all cached connections first.
@@ -84,32 +97,17 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
conn.close()
self.connections.clear()
- delay = self._retry_delay_initial
- for _ in range(retry_count):
- self._last_request_time = time.time()
- try:
- return self.orig_http_request(uri, method, headers=headers, **kwargs)
- except http.client.HTTPException:
- _logger.debug("[%s] Retrying API request in %d s after HTTP error",
- headers['X-Request-Id'], delay, exc_info=True)
- except ssl.SSLCertVerificationError as e:
- raise ssl.SSLCertVerificationError(e.args[0], "Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e)) from None
- except socket.error:
- # This is the one case where httplib2 doesn't close the
- # underlying connection first. Close all open
- # connections, expecting this object only has the one
- # connection to the API server. This is safe because
- # httplib2 reopens connections when needed.
- _logger.debug("[%s] Retrying API request in %d s after socket error",
- headers['X-Request-Id'], delay, exc_info=True)
- for conn in self.connections.values():
- conn.close()
-
- time.sleep(delay)
- delay = delay * self._retry_delay_backoff
-
self._last_request_time = time.time()
- return self.orig_http_request(uri, method, headers=headers, **kwargs)
+ try:
+ response, body = self.orig_http_request(uri, method, headers=headers, **kwargs)
+ except ssl.SSLCertVerificationError as e:
+ raise ssl.SSLCertVerificationError(e.args[0], "Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e)) from None
+ # googleapiclient only retries 403, 429, and 5xx status codes.
+ # If we got another 4xx status that we want to retry, convert it into
+ # 5xx so googleapiclient handles it the way we want.
+ if response.status in retry._HTTP_CAN_RETRY and response.status < 500:
+ response.status = _RETRY_4XX_STATUS
+ return (response, body)
except Exception as e:
# Prepend "[request_id] " to the error message, which we
# assume is the first string argument passed to the exception
@@ -120,16 +118,14 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
raise type(e)(*e.args)
raise
-def _patch_http_request(http, api_token):
+def _patch_http_request(http, api_token, num_retries):
http.arvados_api_token = api_token
http.max_request_size = 0
+ http.num_retries = num_retries
http.orig_http_request = http.request
http.request = types.MethodType(_intercept_http_request, http)
http._last_request_time = 0
http._max_keepalive_idle = MAX_IDLE_CONNECTION_DURATION
- http._retry_delay_initial = RETRY_DELAY_INITIAL
- http._retry_delay_backoff = RETRY_DELAY_BACKOFF
- http._retry_count = RETRY_COUNT
http._request_id = util.new_request_id
return http
@@ -159,141 +155,390 @@ def _new_http_error(cls, *args, **kwargs):
errors.ApiError, *args, **kwargs)
apiclient_errors.HttpError.__new__ = staticmethod(_new_http_error)
-def http_cache(data_type):
- homedir = os.environ.get('HOME')
- if not homedir or len(homedir) == 0:
+def http_cache(data_type: str) -> cache.SafeHTTPCache:
+ """Set up an HTTP file cache
+
+ This function constructs and returns an `arvados.cache.SafeHTTPCache`
+ backed by the filesystem under `~/.cache/arvados/`, or `None` if the
+ directory cannot be set up. The return value can be passed to
+ `httplib2.Http` as the `cache` argument.
+
+ Arguments:
+
+ * data_type: str --- The name of the subdirectory under `~/.cache/arvados`
+ where data is cached.
+ """
+ try:
+ homedir = pathlib.Path.home()
+ except RuntimeError:
return None
- path = homedir + '/.cache/arvados/' + data_type
+ path = pathlib.Path(homedir, '.cache', 'arvados', data_type)
try:
- util.mkdir_dash_p(path)
+ path.mkdir(parents=True, exist_ok=True)
except OSError:
return None
- return cache.SafeHTTPCache(path, max_age=60*60*24*2)
+ return cache.SafeHTTPCache(str(path), max_age=60*60*24*2)
-def api(version=None, cache=True, host=None, token=None, insecure=False,
- request_id=None, timeout=5*60, **kwargs):
- """Return an apiclient Resources object for an Arvados instance.
+def api_client(
+ version: str,
+ discoveryServiceUrl: str,
+ token: str,
+ *,
+ cache: bool=True,
+ http: Optional[httplib2.Http]=None,
+ insecure: bool=False,
+ num_retries: int=10,
+ request_id: Optional[str]=None,
+ timeout: int=5*60,
+ **kwargs: Any,
+) -> apiclient_discovery.Resource:
+ """Build an Arvados API client
- :version:
- A string naming the version of the Arvados API to use (for
- example, 'v1').
+ This function returns a `googleapiclient.discovery.Resource` object
+ constructed from the given arguments. This is a relatively low-level
+ interface that requires all the necessary inputs as arguments. Most
+ users will prefer to use `api` which can accept more flexible inputs.
- :cache:
- Use a cache (~/.cache/arvados/discovery) for the discovery
- document.
+ Arguments:
- :host:
- The Arvados API server host (and optional :port) to connect to.
+ * version: str --- A string naming the version of the Arvados API to use.
- :token:
- The authentication token to send with each API call.
+ * discoveryServiceUrl: str --- The URL used to discover APIs passed
+ directly to `googleapiclient.discovery.build`.
- :insecure:
- If True, ignore SSL certificate validation errors.
+ * token: str --- The authentication token to send with each API call.
- :timeout:
- A timeout value for http requests.
+ Keyword-only arguments:
- :request_id:
- Default X-Request-Id header value for outgoing requests that
- don't already provide one. If None or omitted, generate a random
- ID. When retrying failed requests, the same ID is used on all
- attempts.
+ * cache: bool --- If true, loads the API discovery document from, or
+ saves it to, a cache on disk (located at
+ `~/.cache/arvados/discovery`).
- Additional keyword arguments will be passed directly to
- `apiclient_discovery.build` if a new Resource object is created.
- If the `discoveryServiceUrl` or `http` keyword arguments are
- missing, this function will set default values for them, based on
- the current Arvados configuration settings.
+ * http: httplib2.Http | None --- The HTTP client object the API client
+ object will use to make requests. If not provided, this function will
+ build its own to use. Either way, the object will be patched as part
+ of the build process.
- """
+ * insecure: bool --- If true, ignore SSL certificate validation
+ errors. Default `False`.
- if not version:
- version = 'v1'
- _logger.info("Using default API version. " +
- "Call arvados.api('%s') instead." %
- version)
- if 'discoveryServiceUrl' in kwargs:
- if host:
- raise ValueError("both discoveryServiceUrl and host provided")
- # Here we can't use a token from environment, config file,
- # etc. Those probably have nothing to do with the host
- # provided by the caller.
- if not token:
- raise ValueError("discoveryServiceUrl provided, but token missing")
- elif host and token:
- pass
- elif not host and not token:
- return api_from_config(
- version=version, cache=cache, timeout=timeout,
- request_id=request_id, **kwargs)
- else:
- # Caller provided one but not the other
- if not host:
- raise ValueError("token argument provided, but host missing.")
- else:
- raise ValueError("host argument provided, but token missing.")
-
- if host:
- # Caller wants us to build the discoveryServiceUrl
- kwargs['discoveryServiceUrl'] = (
- 'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,))
-
- if 'http' not in kwargs:
- http_kwargs = {'ca_certs': util.ca_certs_path()}
- if cache:
- http_kwargs['cache'] = http_cache('discovery')
- if insecure:
- http_kwargs['disable_ssl_certificate_validation'] = True
- kwargs['http'] = httplib2.Http(**http_kwargs)
+ * num_retries: int --- The number of times to retry each API request if
+ it encounters a temporary failure. Default 10.
- if kwargs['http'].timeout is None:
- kwargs['http'].timeout = timeout
+ * request_id: str | None --- Default `X-Request-Id` header value for
+ outgoing requests that don't already provide one. If `None` or
+ omitted, generate a random ID. When retrying failed requests, the same
+ ID is used on all attempts.
- kwargs['http'] = _patch_http_request(kwargs['http'], token)
+ * timeout: int --- A timeout value for HTTP requests in seconds. Default
+ 300 (5 minutes).
- svc = apiclient_discovery.build('arvados', version, cache_discovery=False, **kwargs)
+ Additional keyword arguments will be passed directly to
+ `googleapiclient.discovery.build`.
+ """
+ if http is None:
+ http = httplib2.Http(
+ ca_certs=util.ca_certs_path(),
+ cache=http_cache('discovery') if cache else None,
+ disable_ssl_certificate_validation=bool(insecure),
+ )
+ if http.timeout is None:
+ http.timeout = timeout
+ http = _patch_http_request(http, token, num_retries)
+
+ # The first time a client is instantiated, temporarily route
+ # googleapiclient.http retry logs if they're not already. These are
+ # important because temporary problems fetching the discovery document
+ # can cause clients to appear to hang early. This can be removed after
+ # we have a more general story for handling googleapiclient logs (#20521).
+ client_logger = logging.getLogger('googleapiclient.http')
+ # "first time a client is instantiated" = thread that acquires this lock
+ # It is never released.
+ # googleapiclient sets up its own NullHandler so we detect if logging is
+ # configured by looking for a real handler anywhere in the hierarchy.
+ client_logger_unconfigured = _googleapiclient_log_lock.acquire(blocking=False) and all(
+ isinstance(handler, logging.NullHandler)
+ for logger_name in ['', 'googleapiclient', 'googleapiclient.http']
+ for handler in logging.getLogger(logger_name).handlers
+ )
+ if client_logger_unconfigured:
+ client_level = client_logger.level
+ client_filter = GoogleHTTPClientFilter()
+ client_logger.addFilter(client_filter)
+ client_logger.addHandler(log_handler)
+ if logging.NOTSET < client_level < client_filter.retry_levelno:
+ client_logger.setLevel(client_level)
+ else:
+ client_logger.setLevel(client_filter.retry_levelno)
+ try:
+ svc = apiclient_discovery.build(
+ 'arvados', version,
+ cache_discovery=False,
+ discoveryServiceUrl=discoveryServiceUrl,
+ http=http,
+ num_retries=num_retries,
+ **kwargs,
+ )
+ finally:
+ if client_logger_unconfigured:
+ client_logger.removeHandler(log_handler)
+ client_logger.removeFilter(client_filter)
+ client_logger.setLevel(client_level)
svc.api_token = token
svc.insecure = insecure
svc.request_id = request_id
svc.config = lambda: util.get_config_once(svc)
svc.vocabulary = lambda: util.get_vocabulary_once(svc)
svc.close_connections = types.MethodType(_close_connections, svc)
- kwargs['http'].max_request_size = svc._rootDesc.get('maxRequestSize', 0)
- kwargs['http'].cache = None
- kwargs['http']._request_id = lambda: svc.request_id or util.new_request_id()
+ http.max_request_size = svc._rootDesc.get('maxRequestSize', 0)
+ http.cache = None
+ http._request_id = lambda: svc.request_id or util.new_request_id()
return svc
-def api_from_config(version=None, apiconfig=None, **kwargs):
- """Return an apiclient Resources object enabling access to an Arvados server
- instance.
+def normalize_api_kwargs(
+ version: Optional[str]=None,
+ discoveryServiceUrl: Optional[str]=None,
+ host: Optional[str]=None,
+ token: Optional[str]=None,
+ **kwargs: Any,
+) -> Dict[str, Any]:
+ """Validate kwargs from `api` and build kwargs for `api_client`
+
+ This method takes high-level keyword arguments passed to the `api`
+ constructor and normalizes them into a new dictionary that can be passed
+ as keyword arguments to `api_client`. It raises `ValueError` if required
+ arguments are missing or conflict.
+
+ Arguments:
- :version:
- A string naming the version of the Arvados REST API to use (for
- example, 'v1').
+ * version: str | None --- A string naming the version of the Arvados API
+ to use. If not specified, the code will log a warning and fall back to
+ 'v1'.
- :apiconfig:
- If provided, this should be a dict-like object (must support the get()
- method) with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and
- optionally ARVADOS_API_HOST_INSECURE. If not provided, use
- arvados.config (which gets these parameters from the environment by
- default.)
+ * discoveryServiceUrl: str | None --- The URL used to discover APIs
+ passed directly to `googleapiclient.discovery.build`. It is an error
+ to pass both `discoveryServiceUrl` and `host`.
- Other keyword arguments such as `cache` will be passed along `api()`
+ * host: str | None --- The hostname and optional port number of the
+ Arvados API server. Used to build `discoveryServiceUrl`. It is an
+ error to pass both `discoveryServiceUrl` and `host`.
+ * token: str --- The authentication token to send with each API call.
+
+ Additional keyword arguments will be included in the return value.
+ """
+ if discoveryServiceUrl and host:
+ raise ValueError("both discoveryServiceUrl and host provided")
+ elif discoveryServiceUrl:
+ url_src = "discoveryServiceUrl"
+ elif host:
+ url_src = "host argument"
+ discoveryServiceUrl = 'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,)
+ elif token:
+ # This specific error message gets priority for backwards compatibility.
+ raise ValueError("token argument provided, but host missing.")
+ else:
+ raise ValueError("neither discoveryServiceUrl nor host provided")
+ if not token:
+ raise ValueError("%s provided, but token missing" % (url_src,))
+ if not version:
+ version = 'v1'
+ _logger.info(
+ "Using default API version. Call arvados.api(%r) instead.",
+ version,
+ )
+ return {
+ 'discoveryServiceUrl': discoveryServiceUrl,
+ 'token': token,
+ 'version': version,
+ **kwargs,
+ }
+
+def api_kwargs_from_config(
+ version: Optional[str]=None,
+ apiconfig: Optional[Mapping[str, str]]=None,
+ **kwargs: Any
+) -> Dict[str, Any]:
+ """Build `api_client` keyword arguments from configuration
+
+ This function accepts a mapping with Arvados configuration settings like
+ `ARVADOS_API_HOST` and converts them into a mapping of keyword arguments
+ that can be passed to `api_client`. If `ARVADOS_API_HOST` or
+ `ARVADOS_API_TOKEN` are not configured, it raises `ValueError`.
+
+ Arguments:
+
+ * version: str | None --- A string naming the version of the Arvados API
+ to use. If not specified, the code will log a warning and fall back to
+ 'v1'.
+
+ * apiconfig: Mapping[str, str] | None --- A mapping with entries for
+ `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally
+ `ARVADOS_API_HOST_INSECURE`. If not provided, calls
+ `arvados.config.settings` to get these parameters from user
+ configuration.
+
+ Additional keyword arguments will be included in the return value.
"""
- # Load from user configuration or environment
if apiconfig is None:
apiconfig = config.settings()
+ missing = " and ".join(
+ key
+ for key in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']
+ if key not in apiconfig
+ )
+ if missing:
+ raise ValueError(
+ "%s not set.\nPlease set in %s or export environment variable." %
+ (missing, config.default_config_file),
+ )
+ return normalize_api_kwargs(
+ version,
+ None,
+ apiconfig['ARVADOS_API_HOST'],
+ apiconfig['ARVADOS_API_TOKEN'],
+ insecure=config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig),
+ **kwargs,
+ )
+
+def api(
+ version: Optional[str]=None,
+ cache: bool=True,
+ host: Optional[str]=None,
+ token: Optional[str]=None,
+ insecure: bool=False,
+ request_id: Optional[str]=None,
+ timeout: int=5*60,
+ *,
+ discoveryServiceUrl: Optional[str]=None,
+ **kwargs: Any,
+) -> 'arvados.safeapi.ThreadSafeApiCache':
+ """Dynamically build an Arvados API client
+
+ This function provides a high-level "do what I mean" interface to build an
+ Arvados API client object. You can call it with no arguments to build a
+ client from user configuration; pass `host` and `token` arguments just
+ like you would write in user configuration; or pass additional arguments
+ for lower-level control over the client.
+
+ This function returns a `arvados.safeapi.ThreadSafeApiCache`, an
+ API-compatible wrapper around `googleapiclient.discovery.Resource`. If
+ you're handling concurrency yourself and/or your application is very
+ performance-sensitive, consider calling `api_client` directly.
+
+ Arguments:
+
+ * version: str | None --- A string naming the version of the Arvados API
+ to use. If not specified, the code will log a warning and fall back to
+ 'v1'.
+
+ * host: str | None --- The hostname and optional port number of the
+ Arvados API server.
+
+ * token: str | None --- The authentication token to send with each API
+ call.
+
+ * discoveryServiceUrl: str | None --- The URL used to discover APIs
+ passed directly to `googleapiclient.discovery.build`.
+
+ If `host`, `token`, and `discoveryServiceUrl` are all omitted, `host` and
+ `token` will be loaded from the user's configuration. Otherwise, you must
+ pass `token` and one of `host` or `discoveryServiceUrl`. It is an error to
+ pass both `host` and `discoveryServiceUrl`.
+
+ Other arguments are passed directly to `api_client`. See that function's
+ docstring for more information about their meaning.
+ """
+ kwargs.update(
+ cache=cache,
+ insecure=insecure,
+ request_id=request_id,
+ timeout=timeout,
+ )
+ if discoveryServiceUrl or host or token:
+ kwargs.update(normalize_api_kwargs(version, discoveryServiceUrl, host, token))
+ else:
+ kwargs.update(api_kwargs_from_config(version))
+ version = kwargs.pop('version')
+ # We do the import here to avoid a circular import at the top level.
+ from .safeapi import ThreadSafeApiCache
+ return ThreadSafeApiCache({}, {}, kwargs, version)
+
+def api_from_config(
+ version: Optional[str]=None,
+ apiconfig: Optional[Mapping[str, str]]=None,
+ **kwargs: Any
+) -> 'arvados.safeapi.ThreadSafeApiCache':
+ """Build an Arvados API client from a configuration mapping
+
+ This function builds an Arvados API client from a mapping with user
+ configuration. It accepts that mapping as an argument, so you can use a
+ configuration that's different from what the user has set up.
+
+ This function returns a `arvados.safeapi.ThreadSafeApiCache`, an
+ API-compatible wrapper around `googleapiclient.discovery.Resource`. If
+ you're handling concurrency yourself and/or your application is very
+ performance-sensitive, consider calling `api_client` directly.
+
+ Arguments:
+
+ * version: str | None --- A string naming the version of the Arvados API
+ to use. If not specified, the code will log a warning and fall back to
+ 'v1'.
+
+ * apiconfig: Mapping[str, str] | None --- A mapping with entries for
+ `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally
+ `ARVADOS_API_HOST_INSECURE`. If not provided, calls
+ `arvados.config.settings` to get these parameters from user
+ configuration.
+
+ Other arguments are passed directly to `api_client`. See that function's
+ docstring for more information about their meaning.
+ """
+ return api(**api_kwargs_from_config(version, apiconfig, **kwargs))
+
+class OrderedJsonModel(apiclient.model.JsonModel):
+ """Model class for JSON that preserves the contents' order
+
+ .. WARNING:: Deprecated
+ This model is redundant now that Python dictionaries preserve insertion
+ ordering. Code that passes this model to API constructors can remove it.
+
+ In Python versions before 3.6, API clients that cared about preserving the
+ order of fields in API server responses could use this model to do so.
+ Typical usage looked like:
- errors = []
- for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
- if x not in apiconfig:
- errors.append(x)
- if errors:
- raise ValueError(" and ".join(errors)+" not set.\nPlease set in %s or export environment variable." % config.default_config_file)
- host = apiconfig.get('ARVADOS_API_HOST')
- token = apiconfig.get('ARVADOS_API_TOKEN')
- insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)
-
- return api(version=version, host=host, token=token, insecure=insecure, **kwargs)
+ from arvados.api import OrderedJsonModel
+ client = arvados.api('v1', ..., model=OrderedJsonModel())
+ """
+ @util._deprecated(preferred="the default model and rely on Python's built-in dictionary ordering")
+ def __init__(self, data_wrapper=False):
+ return super().__init__(data_wrapper)
+
+
+RETRY_DELAY_INITIAL = 0
+"""
+.. WARNING:: Deprecated
+ This constant was used by retry code in previous versions of the Arvados SDK.
+ Changing the value has no effect anymore.
+ Prefer passing `num_retries` to an API client constructor instead.
+ Refer to the constructor docstrings for details.
+"""
+
+RETRY_DELAY_BACKOFF = 0
+"""
+.. WARNING:: Deprecated
+ This constant was used by retry code in previous versions of the Arvados SDK.
+ Changing the value has no effect anymore.
+ Prefer passing `num_retries` to an API client constructor instead.
+ Refer to the constructor docstrings for details.
+"""
+
+RETRY_COUNT = 0
+"""
+.. WARNING:: Deprecated
+ This constant was used by retry code in previous versions of the Arvados SDK.
+ Changing the value has no effect anymore.
+ Prefer passing `num_retries` to an API client constructor instead.
+ Refer to the constructor docstrings for details.
+"""
diff --git a/sdk/python/arvados/arvfile.py b/sdk/python/arvados/arvfile.py
index 2ce0e46b30..e0e972b5c1 100644
--- a/sdk/python/arvados/arvfile.py
+++ b/sdk/python/arvados/arvfile.py
@@ -100,7 +100,7 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
yield data
def decompressed_name(self):
- return re.sub('\.(bz2|gz)$', '', self.name)
+ return re.sub(r'\.(bz2|gz)$', '', self.name)
@_FileLikeObjectBase._before_close
def seek(self, pos, whence=os.SEEK_SET):
@@ -479,20 +479,20 @@ class _BlockManager(object):
"""
DEFAULT_PUT_THREADS = 2
- DEFAULT_GET_THREADS = 2
- def __init__(self, keep, copies=None, put_threads=None, num_retries=None, storage_classes_func=None, get_threads=None):
+ def __init__(self, keep,
+ copies=None,
+ put_threads=None,
+ num_retries=None,
+ storage_classes_func=None):
"""keep: KeepClient object to use"""
self._keep = keep
self._bufferblocks = collections.OrderedDict()
self._put_queue = None
self._put_threads = None
- self._prefetch_queue = None
- self._prefetch_threads = None
self.lock = threading.Lock()
- self.prefetch_enabled = True
+ self.prefetch_lookahead = self._keep.num_prefetch_threads
self.num_put_threads = put_threads or _BlockManager.DEFAULT_PUT_THREADS
- self.num_get_threads = get_threads or _BlockManager.DEFAULT_GET_THREADS
self.copies = copies
self.storage_classes = storage_classes_func or (lambda: [])
self._pending_write_size = 0
@@ -586,29 +586,6 @@ class _BlockManager(object):
thread.daemon = True
thread.start()
- def _block_prefetch_worker(self):
- """The background downloader thread."""
- while True:
- try:
- b = self._prefetch_queue.get()
- if b is None:
- return
- self._keep.get(b, prefetch=True)
- except Exception:
- _logger.exception("Exception doing block prefetch")
-
- @synchronized
- def start_get_threads(self):
- if self._prefetch_threads is None:
- self._prefetch_queue = queue.Queue()
- self._prefetch_threads = []
- for i in range(0, self.num_get_threads):
- thread = threading.Thread(target=self._block_prefetch_worker)
- self._prefetch_threads.append(thread)
- thread.daemon = True
- thread.start()
-
-
@synchronized
def stop_threads(self):
"""Shut down and wait for background upload and download threads to finish."""
@@ -621,14 +598,6 @@ class _BlockManager(object):
self._put_threads = None
self._put_queue = None
- if self._prefetch_threads is not None:
- for t in self._prefetch_threads:
- self._prefetch_queue.put(None)
- for t in self._prefetch_threads:
- t.join()
- self._prefetch_threads = None
- self._prefetch_queue = None
-
def __enter__(self):
return self
@@ -828,25 +797,20 @@ class _BlockManager(object):
owner.flush(sync=True)
self.delete_bufferblock(k)
+ self.stop_threads()
+
def block_prefetch(self, locator):
"""Initiate a background download of a block.
-
- This assumes that the underlying KeepClient implements a block cache,
- so repeated requests for the same block will not result in repeated
- downloads (unless the block is evicted from the cache.) This method
- does not block.
-
"""
- if not self.prefetch_enabled:
+ if not self.prefetch_lookahead:
return
with self.lock:
if locator in self._bufferblocks:
return
- self.start_get_threads()
- self._prefetch_queue.put(locator)
+ self._keep.block_prefetch(locator)
class ArvadosFile(object):
@@ -861,7 +825,7 @@ class ArvadosFile(object):
"""
__slots__ = ('parent', 'name', '_writers', '_committed',
- '_segments', 'lock', '_current_bblock', 'fuse_entry')
+ '_segments', 'lock', '_current_bblock', 'fuse_entry', '_read_counter')
def __init__(self, parent, name, stream=[], segments=[]):
"""
@@ -882,6 +846,7 @@ class ArvadosFile(object):
for s in segments:
self._add_segment(stream, s.locator, s.range_size)
self._current_bblock = None
+ self._read_counter = 0
def writable(self):
return self.parent.writable()
@@ -1096,7 +1061,25 @@ class ArvadosFile(object):
if size == 0 or offset >= self.size():
return b''
readsegs = locators_and_ranges(self._segments, offset, size)
- prefetch = locators_and_ranges(self._segments, offset + size, config.KEEP_BLOCK_SIZE * self.parent._my_block_manager().num_get_threads, limit=32)
+
+ prefetch = None
+ prefetch_lookahead = self.parent._my_block_manager().prefetch_lookahead
+ if prefetch_lookahead:
+ # Doing prefetch on every read() call is surprisingly expensive
+ # when we're trying to deliver data at 600+ MiBps and want
+ # the read() fast path to be as lightweight as possible.
+ #
+ # Only prefetching every 128 read operations
+ # dramatically reduces the overhead while still
+ # getting the benefit of prefetching (e.g. when
+ # reading 128 KiB at a time, it checks for prefetch
+ # every 16 MiB).
+ self._read_counter = (self._read_counter+1) % 128
+ if self._read_counter == 1:
+ prefetch = locators_and_ranges(self._segments,
+ offset + size,
+ config.KEEP_BLOCK_SIZE * prefetch_lookahead,
+ limit=(1+prefetch_lookahead))
locs = set()
data = []
@@ -1104,17 +1087,21 @@ class ArvadosFile(object):
block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=(bool(data) and not exact))
if block:
blockview = memoryview(block)
- data.append(blockview[lr.segment_offset:lr.segment_offset+lr.segment_size].tobytes())
+ data.append(blockview[lr.segment_offset:lr.segment_offset+lr.segment_size])
locs.add(lr.locator)
else:
break
- for lr in prefetch:
- if lr.locator not in locs:
- self.parent._my_block_manager().block_prefetch(lr.locator)
- locs.add(lr.locator)
+ if prefetch:
+ for lr in prefetch:
+ if lr.locator not in locs:
+ self.parent._my_block_manager().block_prefetch(lr.locator)
+ locs.add(lr.locator)
- return b''.join(data)
+ if len(data) == 1:
+ return data[0]
+ else:
+ return b''.join(data)
@must_be_writable
@synchronized
diff --git a/sdk/python/arvados/collection.py b/sdk/python/arvados/collection.py
index 998481ab66..9e6bd06071 100644
--- a/sdk/python/arvados/collection.py
+++ b/sdk/python/arvados/collection.py
@@ -1,6 +1,16 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""Tools to work with Arvados collections
+
+This module provides high-level interfaces to create, read, and update
+Arvados collections. Most users will want to instantiate `Collection`
+objects, and use methods like `Collection.open` and `Collection.mkdirs` to
+read and write data in the collection. Refer to the Arvados Python SDK
+cookbook for [an introduction to using the Collection class][cookbook].
+
+[cookbook]: https://doc.arvados.org/sdk/python/cookbook.html#working-with-collections
+"""
from __future__ import absolute_import
from future.utils import listitems, listvalues, viewkeys
@@ -35,30 +45,65 @@ import arvados.util
import arvados.events as events
from arvados.retry import retry_method
-_logger = logging.getLogger('arvados.collection')
-
-
-if sys.version_info >= (3, 0):
- TextIOWrapper = io.TextIOWrapper
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ IO,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Union,
+)
+
+if sys.version_info < (3, 8):
+ from typing_extensions import Literal
else:
- class TextIOWrapper(io.TextIOWrapper):
- """To maintain backward compatibility, cast str to unicode in
- write('foo').
+ from typing import Literal
- """
- def write(self, data):
- if isinstance(data, basestring):
- data = unicode(data)
- return super(TextIOWrapper, self).write(data)
+_logger = logging.getLogger('arvados.collection')
+ADD = "add"
+"""Argument value for `Collection` methods to represent an added item"""
+DEL = "del"
+"""Argument value for `Collection` methods to represent a removed item"""
+MOD = "mod"
+"""Argument value for `Collection` methods to represent a modified item"""
+TOK = "tok"
+"""Argument value for `Collection` methods to represent an item with token differences"""
+FILE = "file"
+"""`create_type` value for `Collection.find_or_create`"""
+COLLECTION = "collection"
+"""`create_type` value for `Collection.find_or_create`"""
+
+ChangeList = List[Union[
+ Tuple[Literal[ADD, DEL], str, 'Collection'],
+ Tuple[Literal[MOD, TOK], str, 'Collection', 'Collection'],
+]]
+ChangeType = Literal[ADD, DEL, MOD, TOK]
+CollectionItem = Union[ArvadosFile, 'Collection']
+ChangeCallback = Callable[[ChangeType, 'Collection', str, CollectionItem], object]
+CreateType = Literal[COLLECTION, FILE]
+Properties = Dict[str, Any]
+StorageClasses = List[str]
class CollectionBase(object):
- """Abstract base class for Collection classes."""
+ """Abstract base class for Collection classes
+
+ .. ATTENTION:: Internal
+ This class is meant to be used by other parts of the SDK. User code
+ should instantiate or subclass `Collection` or one of its subclasses
+ directly.
+ """
def __enter__(self):
+ """Enter a context block with this collection instance"""
return self
def __exit__(self, exc_type, exc_value, traceback):
+ """Exit a context block with this collection instance"""
pass
def _my_keep(self):
@@ -67,12 +112,13 @@ class CollectionBase(object):
num_retries=self.num_retries)
return self._keep_client
- def stripped_manifest(self):
- """Get the manifest with locator hints stripped.
+ def stripped_manifest(self) -> str:
+ """Create a copy of the collection manifest with only size hints
- Return the manifest for the current collection with all
- non-portable hints (i.e., permission signatures and other
- hints other than size hints) removed from the locators.
+ This method returns a string with the current collection's manifest
+ text with all non-portable locator hints like permission hints and
+ remote cluster hints removed. The only hints in the returned manifest
+ will be size hints.
"""
raw = self.manifest_text()
clean = []
@@ -111,707 +157,379 @@ class _WriterFile(_FileLikeObjectBase):
self.dest.flush_data()
-class CollectionWriter(CollectionBase):
- """Deprecated, use Collection instead."""
+class RichCollectionBase(CollectionBase):
+ """Base class for Collection classes
- def __init__(self, api_client=None, num_retries=0, replication=None):
- """Instantiate a CollectionWriter.
+ .. ATTENTION:: Internal
+ This class is meant to be used by other parts of the SDK. User code
+ should instantiate or subclass `Collection` or one of its subclasses
+ directly.
+ """
- CollectionWriter lets you build a new Arvados Collection from scratch.
- Write files to it. The CollectionWriter will upload data to Keep as
- appropriate, and provide you with the Collection manifest text when
- you're finished.
+ def __init__(self, parent=None):
+ self.parent = parent
+ self._committed = False
+ self._has_remote_blocks = False
+ self._callback = None
+ self._items = {}
- Arguments:
- * api_client: The API client to use to look up Collections. If not
- provided, CollectionReader will build one from available Arvados
- configuration.
- * num_retries: The default number of times to retry failed
- service requests. Default 0. You may change this value
- after instantiation, but note those changes may not
- propagate to related objects like the Keep client.
- * replication: The number of copies of each block to store.
- If this argument is None or not supplied, replication is
- the server-provided default if available, otherwise 2.
- """
- self._api_client = api_client
- self.num_retries = num_retries
- self.replication = (2 if replication is None else replication)
- self._keep_client = None
- self._data_buffer = []
- self._data_buffer_len = 0
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = '.'
- self._current_file_name = None
- self._current_file_pos = 0
- self._finished_streams = []
- self._close_file = None
- self._queued_file = None
- self._queued_dirents = deque()
- self._queued_trees = deque()
- self._last_open = None
+ def _my_api(self):
+ raise NotImplementedError()
- def __exit__(self, exc_type, exc_value, traceback):
- if exc_type is None:
- self.finish()
+ def _my_keep(self):
+ raise NotImplementedError()
- def do_queued_work(self):
- # The work queue consists of three pieces:
- # * _queued_file: The file object we're currently writing to the
- # Collection.
- # * _queued_dirents: Entries under the current directory
- # (_queued_trees[0]) that we want to write or recurse through.
- # This may contain files from subdirectories if
- # max_manifest_depth == 0 for this directory.
- # * _queued_trees: Directories that should be written as separate
- # streams to the Collection.
- # This function handles the smallest piece of work currently queued
- # (current file, then current directory, then next directory) until
- # no work remains. The _work_THING methods each do a unit of work on
- # THING. _queue_THING methods add a THING to the work queue.
- while True:
- if self._queued_file:
- self._work_file()
- elif self._queued_dirents:
- self._work_dirents()
- elif self._queued_trees:
- self._work_trees()
- else:
- break
+ def _my_block_manager(self):
+ raise NotImplementedError()
- def _work_file(self):
- while True:
- buf = self._queued_file.read(config.KEEP_BLOCK_SIZE)
- if not buf:
- break
- self.write(buf)
- self.finish_current_file()
- if self._close_file:
- self._queued_file.close()
- self._close_file = None
- self._queued_file = None
+ def writable(self) -> bool:
+ """Indicate whether this collection object can be modified
- def _work_dirents(self):
- path, stream_name, max_manifest_depth = self._queued_trees[0]
- if stream_name != self.current_stream_name():
- self.start_new_stream(stream_name)
- while self._queued_dirents:
- dirent = self._queued_dirents.popleft()
- target = os.path.join(path, dirent)
- if os.path.isdir(target):
- self._queue_tree(target,
- os.path.join(stream_name, dirent),
- max_manifest_depth - 1)
- else:
- self._queue_file(target, dirent)
- break
- if not self._queued_dirents:
- self._queued_trees.popleft()
+ This method returns `False` if this object is a `CollectionReader`,
+ else `True`.
+ """
+ raise NotImplementedError()
- def _work_trees(self):
- path, stream_name, max_manifest_depth = self._queued_trees[0]
- d = arvados.util.listdir_recursive(
- path, max_depth = (None if max_manifest_depth == 0 else 0))
- if d:
- self._queue_dirents(stream_name, d)
- else:
- self._queued_trees.popleft()
+ def root_collection(self) -> 'Collection':
+ """Get this collection's root collection object
- def _queue_file(self, source, filename=None):
- assert (self._queued_file is None), "tried to queue more than one file"
- if not hasattr(source, 'read'):
- source = open(source, 'rb')
- self._close_file = True
- else:
- self._close_file = False
- if filename is None:
- filename = os.path.basename(source.name)
- self.start_new_file(filename)
- self._queued_file = source
+ If you open a subcollection with `Collection.find`, calling this method
+ on that subcollection returns the source Collection object.
+ """
+ raise NotImplementedError()
- def _queue_dirents(self, stream_name, dirents):
- assert (not self._queued_dirents), "tried to queue more than one tree"
- self._queued_dirents = deque(sorted(dirents))
+ def stream_name(self) -> str:
+ """Get the name of the manifest stream represented by this collection
- def _queue_tree(self, path, stream_name, max_manifest_depth):
- self._queued_trees.append((path, stream_name, max_manifest_depth))
+ If you open a subcollection with `Collection.find`, calling this method
+ on that subcollection returns the name of the stream you opened.
+ """
+ raise NotImplementedError()
- def write_file(self, source, filename=None):
- self._queue_file(source, filename)
- self.do_queued_work()
+ @synchronized
+ def has_remote_blocks(self) -> bool:
+ """Indiciate whether the collection refers to remote data
- def write_directory_tree(self,
- path, stream_name='.', max_manifest_depth=-1):
- self._queue_tree(path, stream_name, max_manifest_depth)
- self.do_queued_work()
+ Returns `True` if the collection manifest includes any Keep locators
+ with a remote hint (`+R`), else `False`.
+ """
+ if self._has_remote_blocks:
+ return True
+ for item in self:
+ if self[item].has_remote_blocks():
+ return True
+ return False
- def write(self, newdata):
- if isinstance(newdata, bytes):
- pass
- elif isinstance(newdata, str):
- newdata = newdata.encode()
- elif hasattr(newdata, '__iter__'):
- for s in newdata:
- self.write(s)
- return
- self._data_buffer.append(newdata)
- self._data_buffer_len += len(newdata)
- self._current_stream_length += len(newdata)
- while self._data_buffer_len >= config.KEEP_BLOCK_SIZE:
- self.flush_data()
+ @synchronized
+ def set_has_remote_blocks(self, val: bool) -> None:
+ """Cache whether this collection refers to remote blocks
- def open(self, streampath, filename=None):
- """open(streampath[, filename]) -> file-like object
+ .. ATTENTION:: Internal
+ This method is only meant to be used by other Collection methods.
- Pass in the path of a file to write to the Collection, either as a
- single string or as two separate stream name and file name arguments.
- This method returns a file-like object you can write to add it to the
- Collection.
+ Set this collection's cached "has remote blocks" flag to the given
+ value.
+ """
+ self._has_remote_blocks = val
+ if self.parent:
+ self.parent.set_has_remote_blocks(val)
- You may only have one file object from the Collection open at a time,
- so be sure to close the object when you're done. Using the object in
- a with statement makes that easy::
+ @must_be_writable
+ @synchronized
+ def find_or_create(
+ self,
+ path: str,
+ create_type: CreateType,
+ ) -> CollectionItem:
+ """Get the item at the given path, creating it if necessary
+
+ If `path` refers to a stream in this collection, returns a
+ corresponding `Subcollection` object. If `path` refers to a file in
+ this collection, returns a corresponding
+ `arvados.arvfile.ArvadosFile` object. If `path` does not exist in
+ this collection, then this method creates a new object and returns
+ it, creating parent streams as needed. The type of object created is
+ determined by the value of `create_type`.
+
+ Arguments:
+
+ * path: str --- The path to find or create within this collection.
- with cwriter.open('./doc/page1.txt') as outfile:
- outfile.write(page1_data)
- with cwriter.open('./doc/page2.txt') as outfile:
- outfile.write(page2_data)
+ * create_type: Literal[COLLECTION, FILE] --- The type of object to
+ create at `path` if one does not exist. Passing `COLLECTION`
+ creates a stream and returns the corresponding
+ `Subcollection`. Passing `FILE` creates a new file and returns the
+ corresponding `arvados.arvfile.ArvadosFile`.
"""
- if filename is None:
- streampath, filename = split(streampath)
- if self._last_open and not self._last_open.closed:
- raise errors.AssertionError(
- u"can't open '{}' when '{}' is still open".format(
- filename, self._last_open.name))
- if streampath != self.current_stream_name():
- self.start_new_stream(streampath)
- self.set_current_file_name(filename)
- self._last_open = _WriterFile(self, filename)
- return self._last_open
+ pathcomponents = path.split("/", 1)
+ if pathcomponents[0]:
+ item = self._items.get(pathcomponents[0])
+ if len(pathcomponents) == 1:
+ if item is None:
+ # create new file
+ if create_type == COLLECTION:
+ item = Subcollection(self, pathcomponents[0])
+ else:
+ item = ArvadosFile(self, pathcomponents[0])
+ self._items[pathcomponents[0]] = item
+ self.set_committed(False)
+ self.notify(ADD, self, pathcomponents[0], item)
+ return item
+ else:
+ if item is None:
+ # create new collection
+ item = Subcollection(self, pathcomponents[0])
+ self._items[pathcomponents[0]] = item
+ self.set_committed(False)
+ self.notify(ADD, self, pathcomponents[0], item)
+ if isinstance(item, RichCollectionBase):
+ return item.find_or_create(pathcomponents[1], create_type)
+ else:
+ raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
+ else:
+ return self
- def flush_data(self):
- data_buffer = b''.join(self._data_buffer)
- if data_buffer:
- self._current_stream_locators.append(
- self._my_keep().put(
- data_buffer[0:config.KEEP_BLOCK_SIZE],
- copies=self.replication))
- self._data_buffer = [data_buffer[config.KEEP_BLOCK_SIZE:]]
- self._data_buffer_len = len(self._data_buffer[0])
+ @synchronized
+ def find(self, path: str) -> CollectionItem:
+ """Get the item at the given path
- def start_new_file(self, newfilename=None):
- self.finish_current_file()
- self.set_current_file_name(newfilename)
+ If `path` refers to a stream in this collection, returns a
+ corresponding `Subcollection` object. If `path` refers to a file in
+ this collection, returns a corresponding
+ `arvados.arvfile.ArvadosFile` object. If `path` does not exist in
+ this collection, then this method raises `NotADirectoryError`.
- def set_current_file_name(self, newfilename):
- if re.search(r'[\t\n]', newfilename):
- raise errors.AssertionError(
- "Manifest filenames cannot contain whitespace: %s" %
- newfilename)
- elif re.search(r'\x00', newfilename):
- raise errors.AssertionError(
- "Manifest filenames cannot contain NUL characters: %s" %
- newfilename)
- self._current_file_name = newfilename
+ Arguments:
- def current_file_name(self):
- return self._current_file_name
+ * path: str --- The path to find or create within this collection.
+ """
+ if not path:
+ raise errors.ArgumentError("Parameter 'path' is empty.")
- def finish_current_file(self):
- if self._current_file_name is None:
- if self._current_file_pos == self._current_stream_length:
- return
- raise errors.AssertionError(
- "Cannot finish an unnamed file " +
- "(%d bytes at offset %d in '%s' stream)" %
- (self._current_stream_length - self._current_file_pos,
- self._current_file_pos,
- self._current_stream_name))
- self._current_stream_files.append([
- self._current_file_pos,
- self._current_stream_length - self._current_file_pos,
- self._current_file_name])
- self._current_file_pos = self._current_stream_length
- self._current_file_name = None
+ pathcomponents = path.split("/", 1)
+ if pathcomponents[0] == '':
+ raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
- def start_new_stream(self, newstreamname='.'):
- self.finish_current_stream()
- self.set_current_stream_name(newstreamname)
+ item = self._items.get(pathcomponents[0])
+ if item is None:
+ return None
+ elif len(pathcomponents) == 1:
+ return item
+ else:
+ if isinstance(item, RichCollectionBase):
+ if pathcomponents[1]:
+ return item.find(pathcomponents[1])
+ else:
+ return item
+ else:
+ raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
- def set_current_stream_name(self, newstreamname):
- if re.search(r'[\t\n]', newstreamname):
- raise errors.AssertionError(
- "Manifest stream names cannot contain whitespace: '%s'" %
- (newstreamname))
- self._current_stream_name = '.' if newstreamname=='' else newstreamname
+ @synchronized
+ def mkdirs(self, path: str) -> 'Subcollection':
+ """Create and return a subcollection at `path`
- def current_stream_name(self):
- return self._current_stream_name
+ If `path` exists within this collection, raises `FileExistsError`.
+ Otherwise, creates a stream at that path and returns the
+ corresponding `Subcollection`.
+ """
+ if self.find(path) != None:
+ raise IOError(errno.EEXIST, "Directory or file exists", path)
- def finish_current_stream(self):
- self.finish_current_file()
- self.flush_data()
- if not self._current_stream_files:
- pass
- elif self._current_stream_name is None:
- raise errors.AssertionError(
- "Cannot finish an unnamed stream (%d bytes in %d files)" %
- (self._current_stream_length, len(self._current_stream_files)))
- else:
- if not self._current_stream_locators:
- self._current_stream_locators.append(config.EMPTY_BLOCK_LOCATOR)
- self._finished_streams.append([self._current_stream_name,
- self._current_stream_locators,
- self._current_stream_files])
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = None
- self._current_file_pos = 0
- self._current_file_name = None
+ return self.find_or_create(path, COLLECTION)
- def finish(self):
- """Store the manifest in Keep and return its locator.
+ def open(
+ self,
+ path: str,
+ mode: str="r",
+ encoding: Optional[str]=None,
+ ) -> IO:
+ """Open a file-like object within the collection
- This is useful for storing manifest fragments (task outputs)
- temporarily in Keep during a Crunch job.
+ This method returns a file-like object that can read and/or write the
+ file located at `path` within the collection. If you attempt to write
+ a `path` that does not exist, the file is created with `find_or_create`.
+ If the file cannot be opened for any other reason, this method raises
+ `OSError` with an appropriate errno.
- In other cases you should make a collection instead, by
- sending manifest_text() to the API server's "create
- collection" endpoint.
- """
- return self._my_keep().put(self.manifest_text().encode(),
- copies=self.replication)
+ Arguments:
- def portable_data_hash(self):
- stripped = self.stripped_manifest().encode()
- return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))
+ * path: str --- The path of the file to open within this collection
- def manifest_text(self):
- self.finish_current_stream()
- manifest = ''
+ * mode: str --- The mode to open this file. Supports all the same
+ values as `builtins.open`.
- for stream in self._finished_streams:
- if not re.search(r'^\.(/.*)?$', stream[0]):
- manifest += './'
- manifest += stream[0].replace(' ', '\\040')
- manifest += ' ' + ' '.join(stream[1])
- manifest += ' ' + ' '.join("%d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040')) for sfile in stream[2])
- manifest += "\n"
+ * encoding: str | None --- The text encoding of the file. Only used
+ when the file is opened in text mode. The default is
+ platform-dependent.
+ """
+ if not re.search(r'^[rwa][bt]?\+?$', mode):
+ raise errors.ArgumentError("Invalid mode {!r}".format(mode))
- return manifest
+ if mode[0] == 'r' and '+' not in mode:
+ fclass = ArvadosFileReader
+ arvfile = self.find(path)
+ elif not self.writable():
+ raise IOError(errno.EROFS, "Collection is read only")
+ else:
+ fclass = ArvadosFileWriter
+ arvfile = self.find_or_create(path, FILE)
- def data_locators(self):
- ret = []
- for name, locators, files in self._finished_streams:
- ret += locators
- return ret
+ if arvfile is None:
+ raise IOError(errno.ENOENT, "File not found", path)
+ if not isinstance(arvfile, ArvadosFile):
+ raise IOError(errno.EISDIR, "Is a directory", path)
- def save_new(self, name=None):
- return self._api_client.collections().create(
- ensure_unique_name=True,
- body={
- 'name': name,
- 'manifest_text': self.manifest_text(),
- }).execute(num_retries=self.num_retries)
+ if mode[0] == 'w':
+ arvfile.truncate(0)
+ binmode = mode[0] + 'b' + re.sub('[bt]', '', mode[1:])
+ f = fclass(arvfile, mode=binmode, num_retries=self.num_retries)
+ if 'b' not in mode:
+ bufferclass = io.BufferedRandom if f.writable() else io.BufferedReader
+ f = io.TextIOWrapper(bufferclass(WrappableFile(f)), encoding=encoding)
+ return f
-class ResumableCollectionWriter(CollectionWriter):
- """Deprecated, use Collection instead."""
+ def modified(self) -> bool:
+ """Indicate whether this collection has an API server record
- STATE_PROPS = ['_current_stream_files', '_current_stream_length',
- '_current_stream_locators', '_current_stream_name',
- '_current_file_name', '_current_file_pos', '_close_file',
- '_data_buffer', '_dependencies', '_finished_streams',
- '_queued_dirents', '_queued_trees']
+ Returns `False` if this collection corresponds to a record loaded from
+ the API server, `True` otherwise.
+ """
+ return not self.committed()
- def __init__(self, api_client=None, **kwargs):
- self._dependencies = {}
- super(ResumableCollectionWriter, self).__init__(api_client, **kwargs)
+ @synchronized
+ def committed(self):
+ """Indicate whether this collection has an API server record
- @classmethod
- def from_state(cls, state, *init_args, **init_kwargs):
- # Try to build a new writer from scratch with the given state.
- # If the state is not suitable to resume (because files have changed,
- # been deleted, aren't predictable, etc.), raise a
- # StaleWriterStateError. Otherwise, return the initialized writer.
- # The caller is responsible for calling writer.do_queued_work()
- # appropriately after it's returned.
- writer = cls(*init_args, **init_kwargs)
- for attr_name in cls.STATE_PROPS:
- attr_value = state[attr_name]
- attr_class = getattr(writer, attr_name).__class__
- # Coerce the value into the same type as the initial value, if
- # needed.
- if attr_class not in (type(None), attr_value.__class__):
- attr_value = attr_class(attr_value)
- setattr(writer, attr_name, attr_value)
- # Check dependencies before we try to resume anything.
- if any(KeepLocator(ls).permission_expired()
- for ls in writer._current_stream_locators):
- raise errors.StaleWriterStateError(
- "locators include expired permission hint")
- writer.check_dependencies()
- if state['_current_file'] is not None:
- path, pos = state['_current_file']
- try:
- writer._queued_file = open(path, 'rb')
- writer._queued_file.seek(pos)
- except IOError as error:
- raise errors.StaleWriterStateError(
- u"failed to reopen active file {}: {}".format(path, error))
- return writer
+ Returns `True` if this collection corresponds to a record loaded from
+ the API server, `False` otherwise.
+ """
+ return self._committed
- def check_dependencies(self):
- for path, orig_stat in listitems(self._dependencies):
- if not S_ISREG(orig_stat[ST_MODE]):
- raise errors.StaleWriterStateError(u"{} not file".format(path))
- try:
- now_stat = tuple(os.stat(path))
- except OSError as error:
- raise errors.StaleWriterStateError(
- u"failed to stat {}: {}".format(path, error))
- if ((not S_ISREG(now_stat[ST_MODE])) or
- (orig_stat[ST_MTIME] != now_stat[ST_MTIME]) or
- (orig_stat[ST_SIZE] != now_stat[ST_SIZE])):
- raise errors.StaleWriterStateError(u"{} changed".format(path))
+ @synchronized
+ def set_committed(self, value: bool=True):
+ """Cache whether this collection has an API server record
- def dump_state(self, copy_func=lambda x: x):
- state = {attr: copy_func(getattr(self, attr))
- for attr in self.STATE_PROPS}
- if self._queued_file is None:
- state['_current_file'] = None
- else:
- state['_current_file'] = (os.path.realpath(self._queued_file.name),
- self._queued_file.tell())
- return state
+ .. ATTENTION:: Internal
+ This method is only meant to be used by other Collection methods.
- def _queue_file(self, source, filename=None):
- try:
- src_path = os.path.realpath(source)
- except Exception:
- raise errors.AssertionError(u"{} not a file path".format(source))
- try:
- path_stat = os.stat(src_path)
- except OSError as stat_error:
- path_stat = None
- super(ResumableCollectionWriter, self)._queue_file(source, filename)
- fd_stat = os.fstat(self._queued_file.fileno())
- if not S_ISREG(fd_stat.st_mode):
- # We won't be able to resume from this cache anyway, so don't
- # worry about further checks.
- self._dependencies[source] = tuple(fd_stat)
- elif path_stat is None:
- raise errors.AssertionError(
- u"could not stat {}: {}".format(source, stat_error))
- elif path_stat.st_ino != fd_stat.st_ino:
- raise errors.AssertionError(
- u"{} changed between open and stat calls".format(source))
+ Set this collection's cached "committed" flag to the given
+ value and propagates it as needed.
+ """
+ if value == self._committed:
+ return
+ if value:
+ for k,v in listitems(self._items):
+ v.set_committed(True)
+ self._committed = True
else:
- self._dependencies[src_path] = tuple(fd_stat)
+ self._committed = False
+ if self.parent is not None:
+ self.parent.set_committed(False)
- def write(self, data):
- if self._queued_file is None:
- raise errors.AssertionError(
- "resumable writer can't accept unsourced data")
- return super(ResumableCollectionWriter, self).write(data)
+ @synchronized
+ def __iter__(self) -> Iterator[str]:
+ """Iterate names of streams and files in this collection
+ This method does not recurse. It only iterates the contents of this
+ collection's corresponding stream.
+ """
+ return iter(viewkeys(self._items))
-ADD = "add"
-DEL = "del"
-MOD = "mod"
-TOK = "tok"
-FILE = "file"
-COLLECTION = "collection"
+ @synchronized
+ def __getitem__(self, k: str) -> CollectionItem:
+ """Get a `arvados.arvfile.ArvadosFile` or `Subcollection` in this collection
-class RichCollectionBase(CollectionBase):
- """Base class for Collections and Subcollections.
+ This method does not recurse. If you want to search a path, use
+ `RichCollectionBase.find` instead.
+ """
+ return self._items[k]
- Implements the majority of functionality relating to accessing items in the
- Collection.
-
- """
-
- def __init__(self, parent=None):
- self.parent = parent
- self._committed = False
- self._has_remote_blocks = False
- self._callback = None
- self._items = {}
-
- def _my_api(self):
- raise NotImplementedError()
-
- def _my_keep(self):
- raise NotImplementedError()
-
- def _my_block_manager(self):
- raise NotImplementedError()
-
- def writable(self):
- raise NotImplementedError()
-
- def root_collection(self):
- raise NotImplementedError()
-
- def notify(self, event, collection, name, item):
- raise NotImplementedError()
-
- def stream_name(self):
- raise NotImplementedError()
+ @synchronized
+ def __contains__(self, k: str) -> bool:
+ """Indicate whether this collection has an item with this name
+ This method does not recurse. It you want to check a path, use
+ `RichCollectionBase.exists` instead.
+ """
+ return k in self._items
@synchronized
- def has_remote_blocks(self):
- """Recursively check for a +R segment locator signature."""
-
- if self._has_remote_blocks:
- return True
- for item in self:
- if self[item].has_remote_blocks():
- return True
- return False
+ def __len__(self):
+ """Get the number of items directly contained in this collection
- @synchronized
- def set_has_remote_blocks(self, val):
- self._has_remote_blocks = val
- if self.parent:
- self.parent.set_has_remote_blocks(val)
+ This method does not recurse. It only counts the streams and files
+ in this collection's corresponding stream.
+ """
+ return len(self._items)
@must_be_writable
@synchronized
- def find_or_create(self, path, create_type):
- """Recursively search the specified file path.
-
- May return either a `Collection` or `ArvadosFile`. If not found, will
- create a new item at the specified path based on `create_type`. Will
- create intermediate subcollections needed to contain the final item in
- the path.
-
- :create_type:
- One of `arvados.collection.FILE` or
- `arvados.collection.COLLECTION`. If the path is not found, and value
- of create_type is FILE then create and return a new ArvadosFile for
- the last path component. If COLLECTION, then create and return a new
- Collection for the last path component.
+ def __delitem__(self, p: str) -> None:
+ """Delete an item from this collection's stream
+ This method does not recurse. If you want to remove an item by a
+ path, use `RichCollectionBase.remove` instead.
"""
-
- pathcomponents = path.split("/", 1)
- if pathcomponents[0]:
- item = self._items.get(pathcomponents[0])
- if len(pathcomponents) == 1:
- if item is None:
- # create new file
- if create_type == COLLECTION:
- item = Subcollection(self, pathcomponents[0])
- else:
- item = ArvadosFile(self, pathcomponents[0])
- self._items[pathcomponents[0]] = item
- self.set_committed(False)
- self.notify(ADD, self, pathcomponents[0], item)
- return item
- else:
- if item is None:
- # create new collection
- item = Subcollection(self, pathcomponents[0])
- self._items[pathcomponents[0]] = item
- self.set_committed(False)
- self.notify(ADD, self, pathcomponents[0], item)
- if isinstance(item, RichCollectionBase):
- return item.find_or_create(pathcomponents[1], create_type)
- else:
- raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
- else:
- return self
+ del self._items[p]
+ self.set_committed(False)
+ self.notify(DEL, self, p, None)
@synchronized
- def find(self, path):
- """Recursively search the specified file path.
-
- May return either a Collection or ArvadosFile. Return None if not
- found.
- If path is invalid (ex: starts with '/'), an IOError exception will be
- raised.
+ def keys(self) -> Iterator[str]:
+ """Iterate names of streams and files in this collection
+ This method does not recurse. It only iterates the contents of this
+ collection's corresponding stream.
"""
- if not path:
- raise errors.ArgumentError("Parameter 'path' is empty.")
-
- pathcomponents = path.split("/", 1)
- if pathcomponents[0] == '':
- raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
-
- item = self._items.get(pathcomponents[0])
- if item is None:
- return None
- elif len(pathcomponents) == 1:
- return item
- else:
- if isinstance(item, RichCollectionBase):
- if pathcomponents[1]:
- return item.find(pathcomponents[1])
- else:
- return item
- else:
- raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
+ return self._items.keys()
@synchronized
- def mkdirs(self, path):
- """Recursive subcollection create.
-
- Like `os.makedirs()`. Will create intermediate subcollections needed
- to contain the leaf subcollection path.
-
- """
-
- if self.find(path) != None:
- raise IOError(errno.EEXIST, "Directory or file exists", path)
-
- return self.find_or_create(path, COLLECTION)
-
- def open(self, path, mode="r", encoding=None):
- """Open a file-like object for access.
-
- :path:
- path to a file in the collection
- :mode:
- a string consisting of "r", "w", or "a", optionally followed
- by "b" or "t", optionally followed by "+".
- :"b":
- binary mode: write() accepts bytes, read() returns bytes.
- :"t":
- text mode (default): write() accepts strings, read() returns strings.
- :"r":
- opens for reading
- :"r+":
- opens for reading and writing. Reads/writes share a file pointer.
- :"w", "w+":
- truncates to 0 and opens for reading and writing. Reads/writes share a file pointer.
- :"a", "a+":
- opens for reading and writing. All writes are appended to
- the end of the file. Writing does not affect the file pointer for
- reading.
+ def values(self) -> List[CollectionItem]:
+ """Get a list of objects in this collection's stream
+ The return value includes a `Subcollection` for every stream, and an
+ `arvados.arvfile.ArvadosFile` for every file, directly within this
+ collection's stream. This method does not recurse.
"""
-
- if not re.search(r'^[rwa][bt]?\+?$', mode):
- raise errors.ArgumentError("Invalid mode {!r}".format(mode))
-
- if mode[0] == 'r' and '+' not in mode:
- fclass = ArvadosFileReader
- arvfile = self.find(path)
- elif not self.writable():
- raise IOError(errno.EROFS, "Collection is read only")
- else:
- fclass = ArvadosFileWriter
- arvfile = self.find_or_create(path, FILE)
-
- if arvfile is None:
- raise IOError(errno.ENOENT, "File not found", path)
- if not isinstance(arvfile, ArvadosFile):
- raise IOError(errno.EISDIR, "Is a directory", path)
-
- if mode[0] == 'w':
- arvfile.truncate(0)
-
- binmode = mode[0] + 'b' + re.sub('[bt]', '', mode[1:])
- f = fclass(arvfile, mode=binmode, num_retries=self.num_retries)
- if 'b' not in mode:
- bufferclass = io.BufferedRandom if f.writable() else io.BufferedReader
- f = TextIOWrapper(bufferclass(WrappableFile(f)), encoding=encoding)
- return f
-
- def modified(self):
- """Determine if the collection has been modified since last commited."""
- return not self.committed()
-
- @synchronized
- def committed(self):
- """Determine if the collection has been committed to the API server."""
- return self._committed
+ return listvalues(self._items)
@synchronized
- def set_committed(self, value=True):
- """Recursively set committed flag.
-
- If value is True, set committed to be True for this and all children.
+ def items(self) -> List[Tuple[str, CollectionItem]]:
+ """Get a list of `(name, object)` tuples from this collection's stream
- If value is False, set committed to be False for this and all parents.
+ The return value includes a `Subcollection` for every stream, and an
+ `arvados.arvfile.ArvadosFile` for every file, directly within this
+ collection's stream. This method does not recurse.
"""
- if value == self._committed:
- return
- if value:
- for k,v in listitems(self._items):
- v.set_committed(True)
- self._committed = True
- else:
- self._committed = False
- if self.parent is not None:
- self.parent.set_committed(False)
+ return listitems(self._items)
- @synchronized
- def __iter__(self):
- """Iterate over names of files and collections contained in this collection."""
- return iter(viewkeys(self._items))
+ def exists(self, path: str) -> bool:
+ """Indicate whether this collection includes an item at `path`
- @synchronized
- def __getitem__(self, k):
- """Get a file or collection that is directly contained by this collection.
+ This method returns `True` if `path` refers to a stream or file within
+ this collection, else `False`.
- If you want to search a path, use `find()` instead.
+ Arguments:
+ * path: str --- The path to check for existence within this collection
"""
- return self._items[k]
-
- @synchronized
- def __contains__(self, k):
- """Test if there is a file or collection a directly contained by this collection."""
- return k in self._items
-
- @synchronized
- def __len__(self):
- """Get the number of items directly contained in this collection."""
- return len(self._items)
+ return self.find(path) is not None
@must_be_writable
@synchronized
- def __delitem__(self, p):
- """Delete an item by name which is directly contained by this collection."""
- del self._items[p]
- self.set_committed(False)
- self.notify(DEL, self, p, None)
-
- @synchronized
- def keys(self):
- """Get a list of names of files and collections directly contained in this collection."""
- return self._items.keys()
-
- @synchronized
- def values(self):
- """Get a list of files and collection objects directly contained in this collection."""
- return listvalues(self._items)
-
- @synchronized
- def items(self):
- """Get a list of (name, object) tuples directly contained in this collection."""
- return listitems(self._items)
+ def remove(self, path: str, recursive: bool=False) -> None:
+ """Remove the file or stream at `path`
- def exists(self, path):
- """Test if there is a file or collection at `path`."""
- return self.find(path) is not None
+ Arguments:
- @must_be_writable
- @synchronized
- def remove(self, path, recursive=False):
- """Remove the file or subcollection (directory) at `path`.
+ * path: str --- The path of the item to remove from the collection
- :recursive:
- Specify whether to remove non-empty subcollections (True), or raise an error (False).
+ * recursive: bool --- Controls the method's behavior if `path` refers
+ to a nonempty stream. If `False` (the default), this method raises
+ `OSError` with errno `ENOTEMPTY`. If `True`, this method removes all
+ items under the stream.
"""
-
if not path:
raise errors.ArgumentError("Parameter 'path' is empty.")
@@ -838,26 +556,33 @@ class RichCollectionBase(CollectionBase):
@must_be_writable
@synchronized
- def add(self, source_obj, target_name, overwrite=False, reparent=False):
- """Copy or move a file or subcollection to this collection.
+ def add(
+ self,
+ source_obj: CollectionItem,
+ target_name: str,
+ overwrite: bool=False,
+ reparent: bool=False,
+ ) -> None:
+ """Copy or move a file or subcollection object to this collection
- :source_obj:
- An ArvadosFile, or Subcollection object
+ Arguments:
- :target_name:
- Destination item name. If the target name already exists and is a
- file, this will raise an error unless you specify `overwrite=True`.
+ * source_obj: arvados.arvfile.ArvadosFile | Subcollection --- The file or subcollection
+ to add to this collection
- :overwrite:
- Whether to overwrite target file if it already exists.
+ * target_name: str --- The path inside this collection where
+ `source_obj` should be added.
- :reparent:
- If True, source_obj will be moved from its parent collection to this collection.
- If False, source_obj will be copied and the parent collection will be
- unmodified.
+ * overwrite: bool --- Controls the behavior of this method when the
+ collection already contains an object at `target_name`. If `False`
+ (the default), this method will raise `FileExistsError`. If `True`,
+ the object at `target_name` will be replaced with `source_obj`.
+ * reparent: bool --- Controls whether this method copies or moves
+ `source_obj`. If `False` (the default), `source_obj` is copied into
+ this collection. If `True`, `source_obj` is moved into this
+ collection.
"""
-
if target_name in self and not overwrite:
raise IOError(errno.EEXIST, "File already exists", target_name)
@@ -924,92 +649,117 @@ class RichCollectionBase(CollectionBase):
@must_be_writable
@synchronized
- def copy(self, source, target_path, source_collection=None, overwrite=False):
- """Copy a file or subcollection to a new path in this collection.
+ def copy(
+ self,
+ source: Union[str, CollectionItem],
+ target_path: str,
+ source_collection: Optional['RichCollectionBase']=None,
+ overwrite: bool=False,
+ ) -> None:
+ """Copy a file or subcollection object to this collection
+
+ Arguments:
- :source:
- A string with a path to source file or subcollection, or an actual ArvadosFile or Subcollection object.
+ * source: str | arvados.arvfile.ArvadosFile |
+ arvados.collection.Subcollection --- The file or subcollection to
+ add to this collection. If `source` is a str, the object will be
+ found by looking up this path from `source_collection` (see
+ below).
- :target_path:
- Destination file or path. If the target path already exists and is a
- subcollection, the item will be placed inside the subcollection. If
- the target path already exists and is a file, this will raise an error
- unless you specify `overwrite=True`.
+ * target_path: str --- The path inside this collection where the
+ source object should be added.
- :source_collection:
- Collection to copy `source_path` from (default `self`)
+ * source_collection: arvados.collection.Collection | None --- The
+ collection to find the source object from when `source` is a
+ path. Defaults to the current collection (`self`).
- :overwrite:
- Whether to overwrite target file if it already exists.
+ * overwrite: bool --- Controls the behavior of this method when the
+ collection already contains an object at `target_path`. If `False`
+ (the default), this method will raise `FileExistsError`. If `True`,
+ the object at `target_path` will be replaced with `source_obj`.
"""
-
source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, True)
target_dir.add(source_obj, target_name, overwrite, False)
@must_be_writable
@synchronized
- def rename(self, source, target_path, source_collection=None, overwrite=False):
- """Move a file or subcollection from `source_collection` to a new path in this collection.
+ def rename(
+ self,
+ source: Union[str, CollectionItem],
+ target_path: str,
+ source_collection: Optional['RichCollectionBase']=None,
+ overwrite: bool=False,
+ ) -> None:
+ """Move a file or subcollection object to this collection
- :source:
- A string with a path to source file or subcollection.
+ Arguments:
- :target_path:
- Destination file or path. If the target path already exists and is a
- subcollection, the item will be placed inside the subcollection. If
- the target path already exists and is a file, this will raise an error
- unless you specify `overwrite=True`.
+ * source: str | arvados.arvfile.ArvadosFile |
+ arvados.collection.Subcollection --- The file or subcollection to
+ add to this collection. If `source` is a str, the object will be
+ found by looking up this path from `source_collection` (see
+ below).
- :source_collection:
- Collection to copy `source_path` from (default `self`)
+ * target_path: str --- The path inside this collection where the
+ source object should be added.
- :overwrite:
- Whether to overwrite target file if it already exists.
- """
+ * source_collection: arvados.collection.Collection | None --- The
+ collection to find the source object from when `source` is a
+ path. Defaults to the current collection (`self`).
+ * overwrite: bool --- Controls the behavior of this method when the
+ collection already contains an object at `target_path`. If `False`
+ (the default), this method will raise `FileExistsError`. If `True`,
+ the object at `target_path` will be replaced with `source_obj`.
+ """
source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, False)
if not source_obj.writable():
raise IOError(errno.EROFS, "Source collection is read only", source)
target_dir.add(source_obj, target_name, overwrite, True)
- def portable_manifest_text(self, stream_name="."):
- """Get the manifest text for this collection, sub collections and files.
+ def portable_manifest_text(self, stream_name: str=".") -> str:
+ """Get the portable manifest text for this collection
- This method does not flush outstanding blocks to Keep. It will return
- a normalized manifest with access tokens stripped.
+ The portable manifest text is normalized, and does not include access
+ tokens. This method does not flush outstanding blocks to Keep.
- :stream_name:
- Name to use for this stream (directory)
+ Arguments:
+ * stream_name: str --- The name to use for this collection's stream in
+ the generated manifest. Default `'.'`.
"""
return self._get_manifest_text(stream_name, True, True)
@synchronized
- def manifest_text(self, stream_name=".", strip=False, normalize=False,
- only_committed=False):
- """Get the manifest text for this collection, sub collections and files.
+ def manifest_text(
+ self,
+ stream_name: str=".",
+ strip: bool=False,
+ normalize: bool=False,
+ only_committed: bool=False,
+ ) -> str:
+ """Get the manifest text for this collection
- This method will flush outstanding blocks to Keep. By default, it will
- not normalize an unmodified manifest or strip access tokens.
-
- :stream_name:
- Name to use for this stream (directory)
+ Arguments:
- :strip:
- If True, remove signing tokens from block locators if present.
- If False (default), block locators are left unchanged.
+ * stream_name: str --- The name to use for this collection's stream in
+ the generated manifest. Default `'.'`.
- :normalize:
- If True, always export the manifest text in normalized form
- even if the Collection is not modified. If False (default) and the collection
- is not modified, return the original manifest text even if it is not
- in normalized form.
+ * strip: bool --- Controls whether or not the returned manifest text
+ includes access tokens. If `False` (the default), the manifest text
+ will include access tokens. If `True`, the manifest text will not
+ include access tokens.
- :only_committed:
- If True, don't commit pending blocks.
+ * normalize: bool --- Controls whether or not the returned manifest
+ text is normalized. Default `False`.
+ * only_committed: bool --- Controls whether or not this method uploads
+ pending data to Keep before building and returning the manifest text.
+ If `False` (the default), this method will finish uploading all data
+ to Keep, then return the final manifest. If `True`, this method will
+ build and return a manifest that only refers to the data that has
+ finished uploading at the time this method was called.
"""
-
if not only_committed:
self._my_block_manager().commit_all()
return self._get_manifest_text(stream_name, strip, normalize,
@@ -1088,11 +838,27 @@ class RichCollectionBase(CollectionBase):
return remote_blocks
@synchronized
- def diff(self, end_collection, prefix=".", holding_collection=None):
- """Generate list of add/modify/delete actions.
+ def diff(
+ self,
+ end_collection: 'RichCollectionBase',
+ prefix: str=".",
+ holding_collection: Optional['Collection']=None,
+ ) -> ChangeList:
+ """Build a list of differences between this collection and another
- When given to `apply`, will change `self` to match `end_collection`
+ Arguments:
+
+ * end_collection: arvados.collection.RichCollectionBase --- A
+ collection object with the desired end state. The returned diff
+ list will describe how to go from the current collection object
+ `self` to `end_collection`.
+ * prefix: str --- The name to use for this collection's stream in
+ the diff list. Default `'.'`.
+
+ * holding_collection: arvados.collection.Collection | None --- A
+ collection object used to hold objects for the returned diff
+ list. By default, a new empty collection is created.
"""
changes = []
if holding_collection is None:
@@ -1114,12 +880,20 @@ class RichCollectionBase(CollectionBase):
@must_be_writable
@synchronized
- def apply(self, changes):
- """Apply changes from `diff`.
+ def apply(self, changes: ChangeList) -> None:
+ """Apply a list of changes from to this collection
+
+ This method takes a list of changes generated by
+ `RichCollectionBase.diff` and applies it to this
+ collection. Afterward, the state of this collection object will
+ match the state of `end_collection` passed to `diff`. If a change
+ conflicts with a local change, it will be saved to an alternate path
+ indicating the conflict.
- If a change conflicts with a local change, it will be saved to an
- alternate path indicating the conflict.
+ Arguments:
+ * changes: arvados.collection.ChangeList --- The list of differences
+ generated by `RichCollectionBase.diff`.
"""
if changes:
self.set_committed(False)
@@ -1161,8 +935,8 @@ class RichCollectionBase(CollectionBase):
# else, the file is modified or already removed, in either
# case we don't want to try to remove it.
- def portable_data_hash(self):
- """Get the portable data hash for this collection's manifest."""
+ def portable_data_hash(self) -> str:
+ """Get the portable data hash for this collection's manifest"""
if self._manifest_locator and self.committed():
# If the collection is already saved on the API server, and it's committed
# then return API server's PDH response.
@@ -1172,25 +946,64 @@ class RichCollectionBase(CollectionBase):
return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))
@synchronized
- def subscribe(self, callback):
+ def subscribe(self, callback: ChangeCallback) -> None:
+ """Set a notify callback for changes to this collection
+
+ Arguments:
+
+ * callback: arvados.collection.ChangeCallback --- The callable to
+ call each time the collection is changed.
+ """
if self._callback is None:
self._callback = callback
else:
raise errors.ArgumentError("A callback is already set on this collection.")
@synchronized
- def unsubscribe(self):
+ def unsubscribe(self) -> None:
+ """Remove any notify callback set for changes to this collection"""
if self._callback is not None:
self._callback = None
@synchronized
- def notify(self, event, collection, name, item):
+ def notify(
+ self,
+ event: ChangeType,
+ collection: 'RichCollectionBase',
+ name: str,
+ item: CollectionItem,
+ ) -> None:
+ """Notify any subscribed callback about a change to this collection
+
+ .. ATTENTION:: Internal
+ This method is only meant to be used by other Collection methods.
+
+ If a callback has been registered with `RichCollectionBase.subscribe`,
+ it will be called with information about a change to this collection.
+ Then this notification will be propagated to this collection's root.
+
+ Arguments:
+
+ * event: Literal[ADD, DEL, MOD, TOK] --- The type of modification to
+ the collection.
+
+ * collection: arvados.collection.RichCollectionBase --- The
+ collection that was modified.
+
+ * name: str --- The name of the file or stream within `collection` that
+ was modified.
+
+ * item: arvados.arvfile.ArvadosFile |
+ arvados.collection.Subcollection --- The new contents at `name`
+ within `collection`.
+ """
if self._callback:
self._callback(event, collection, name, item)
self.root_collection().notify(event, collection, name, item)
@synchronized
- def __eq__(self, other):
+ def __eq__(self, other: Any) -> bool:
+ """Indicate whether this collection object is equal to another"""
if other is self:
return True
if not isinstance(other, RichCollectionBase):
@@ -1204,102 +1017,97 @@ class RichCollectionBase(CollectionBase):
return False
return True
- def __ne__(self, other):
+ def __ne__(self, other: Any) -> bool:
+ """Indicate whether this collection object is not equal to another"""
return not self.__eq__(other)
@synchronized
- def flush(self):
- """Flush bufferblocks to Keep."""
+ def flush(self) -> None:
+ """Upload any pending data to Keep"""
for e in listvalues(self):
e.flush()
class Collection(RichCollectionBase):
- """Represents the root of an Arvados Collection.
-
- This class is threadsafe. The root collection object, all subcollections
- and files are protected by a single lock (i.e. each access locks the entire
- collection).
-
- Brief summary of
- useful methods:
-
- :To read an existing file:
- `c.open("myfile", "r")`
-
- :To write a new file:
- `c.open("myfile", "w")`
-
- :To determine if a file exists:
- `c.find("myfile") is not None`
-
- :To copy a file:
- `c.copy("source", "dest")`
+ """Read and manipulate an Arvados collection
- :To delete a file:
- `c.remove("myfile")`
-
- :To save to an existing collection record:
- `c.save()`
-
- :To save a new collection record:
- `c.save_new()`
-
- :To merge remote changes into this object:
- `c.update()`
-
- Must be associated with an API server Collection record (during
- initialization, or using `save_new`) to use `save` or `update`
+ This class provides a high-level interface to create, read, and update
+ Arvados collections and their contents. Refer to the Arvados Python SDK
+ cookbook for [an introduction to using the Collection class][cookbook].
+ [cookbook]: https://doc.arvados.org/sdk/python/cookbook.html#working-with-collections
"""
- def __init__(self, manifest_locator_or_text=None,
- api_client=None,
- keep_client=None,
- num_retries=None,
- parent=None,
- apiconfig=None,
- block_manager=None,
- replication_desired=None,
- storage_classes_desired=None,
- put_threads=None,
- get_threads=None):
- """Collection constructor.
-
- :manifest_locator_or_text:
- An Arvados collection UUID, portable data hash, raw manifest
- text, or (if creating an empty collection) None.
-
- :parent:
- the parent Collection, may be None.
-
- :apiconfig:
- A dict containing keys for ARVADOS_API_HOST and ARVADOS_API_TOKEN.
- Prefer this over supplying your own api_client and keep_client (except in testing).
- Will use default config settings if not specified.
-
- :api_client:
- The API client object to use for requests. If not specified, create one using `apiconfig`.
-
- :keep_client:
- the Keep client to use for requests. If not specified, create one using `apiconfig`.
-
- :num_retries:
- the number of retries for API and Keep requests.
-
- :block_manager:
- the block manager to use. If not specified, create one.
-
- :replication_desired:
- How many copies should Arvados maintain. If None, API server default
- configuration applies. If not None, this value will also be used
- for determining the number of block copies being written.
-
- :storage_classes_desired:
- A list of storage class names where to upload the data. If None,
- the keep client is expected to store the data into the cluster's
- default storage class(es).
+ def __init__(self, manifest_locator_or_text: Optional[str]=None,
+ api_client: Optional['arvados.api_resources.ArvadosAPIClient']=None,
+ keep_client: Optional['arvados.keep.KeepClient']=None,
+ num_retries: int=10,
+ parent: Optional['Collection']=None,
+ apiconfig: Optional[Mapping[str, str]]=None,
+ block_manager: Optional['arvados.arvfile._BlockManager']=None,
+ replication_desired: Optional[int]=None,
+ storage_classes_desired: Optional[List[str]]=None,
+ put_threads: Optional[int]=None):
+ """Initialize a Collection object
+ Arguments:
+
+ * manifest_locator_or_text: str | None --- This string can contain a
+ collection manifest text, portable data hash, or UUID. When given a
+ portable data hash or UUID, this instance will load a collection
+ record from the API server. Otherwise, this instance will represent a
+ new collection without an API server record. The default value `None`
+ instantiates a new collection with an empty manifest.
+
+ * api_client: arvados.api_resources.ArvadosAPIClient | None --- The
+ Arvados API client object this instance uses to make requests. If
+ none is given, this instance creates its own client using the
+ settings from `apiconfig` (see below). If your client instantiates
+ many Collection objects, you can help limit memory utilization by
+ calling `arvados.api.api` to construct an
+ `arvados.safeapi.ThreadSafeApiCache`, and use that as the `api_client`
+ for every Collection.
+
+ * keep_client: arvados.keep.KeepClient | None --- The Keep client
+ object this instance uses to make requests. If none is given, this
+ instance creates its own client using its `api_client`.
+
+ * num_retries: int --- The number of times that client requests are
+ retried. Default 10.
+
+ * parent: arvados.collection.Collection | None --- The parent Collection
+ object of this instance, if any. This argument is primarily used by
+ other Collection methods; user client code shouldn't need to use it.
+
+ * apiconfig: Mapping[str, str] | None --- A mapping with entries for
+ `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally
+ `ARVADOS_API_HOST_INSECURE`. When no `api_client` is provided, the
+ Collection object constructs one from these settings. If no
+ mapping is provided, calls `arvados.config.settings` to get these
+ parameters from user configuration.
+
+ * block_manager: arvados.arvfile._BlockManager | None --- The
+ _BlockManager object used by this instance to coordinate reading
+ and writing Keep data blocks. If none is given, this instance
+ constructs its own. This argument is primarily used by other
+ Collection methods; user client code shouldn't need to use it.
+
+ * replication_desired: int | None --- This controls both the value of
+ the `replication_desired` field on API collection records saved by
+ this class, as well as the number of Keep services that the object
+ writes new data blocks to. If none is given, uses the default value
+ configured for the cluster.
+
+ * storage_classes_desired: list[str] | None --- This controls both
+ the value of the `storage_classes_desired` field on API collection
+ records saved by this class, as well as selecting which specific
+ Keep services the object writes new data blocks to. If none is
+ given, defaults to an empty list.
+
+ * put_threads: int | None --- The number of threads to run
+ simultaneously to upload data blocks to Keep. This value is used when
+ building a new `block_manager`. It is unused when a `block_manager`
+ is provided.
"""
if storage_classes_desired and type(storage_classes_desired) is not list:
@@ -1308,18 +1116,22 @@ class Collection(RichCollectionBase):
super(Collection, self).__init__(parent)
self._api_client = api_client
self._keep_client = keep_client
+
+ # Use the keep client from ThreadSafeApiCache
+ if self._keep_client is None and isinstance(self._api_client, ThreadSafeApiCache):
+ self._keep_client = self._api_client.keep
+
self._block_manager = block_manager
self.replication_desired = replication_desired
self._storage_classes_desired = storage_classes_desired
self.put_threads = put_threads
- self.get_threads = get_threads
if apiconfig:
self._config = apiconfig
else:
self._config = config.settings()
- self.num_retries = num_retries if num_retries is not None else 0
+ self.num_retries = num_retries
self._manifest_locator = None
self._manifest_text = None
self._portable_data_hash = None
@@ -1349,19 +1161,33 @@ class Collection(RichCollectionBase):
except errors.SyntaxError as e:
raise errors.ArgumentError("Error processing manifest text: %s", str(e)) from None
- def storage_classes_desired(self):
+ def storage_classes_desired(self) -> List[str]:
+ """Get this collection's `storage_classes_desired` value"""
return self._storage_classes_desired or []
- def root_collection(self):
+ def root_collection(self) -> 'Collection':
return self
- def get_properties(self):
+ def get_properties(self) -> Properties:
+ """Get this collection's properties
+
+ This method always returns a dict. If this collection object does not
+ have an associated API record, or that record does not have any
+ properties set, this method returns an empty dict.
+ """
if self._api_response and self._api_response["properties"]:
return self._api_response["properties"]
else:
return {}
- def get_trash_at(self):
+ def get_trash_at(self) -> Optional[datetime.datetime]:
+ """Get this collection's `trash_at` field
+
+ This method parses the `trash_at` field of the collection's API
+ record and returns a datetime from it. If that field is not set, or
+ this collection object does not have an associated API record,
+ returns None.
+ """
if self._api_response and self._api_response["trash_at"]:
try:
return ciso8601.parse_datetime(self._api_response["trash_at"])
@@ -1370,21 +1196,57 @@ class Collection(RichCollectionBase):
else:
return None
- def stream_name(self):
+ def stream_name(self) -> str:
return "."
- def writable(self):
+ def writable(self) -> bool:
return True
@synchronized
- def known_past_version(self, modified_at_and_portable_data_hash):
+ def known_past_version(
+ self,
+ modified_at_and_portable_data_hash: Tuple[Optional[str], Optional[str]]
+ ) -> bool:
+ """Indicate whether an API record for this collection has been seen before
+
+ As this collection object loads records from the API server, it records
+ their `modified_at` and `portable_data_hash` fields. This method accepts
+ a 2-tuple with values for those fields, and returns `True` if the
+ combination was previously loaded.
+ """
return modified_at_and_portable_data_hash in self._past_versions
@synchronized
@retry_method
- def update(self, other=None, num_retries=None):
- """Merge the latest collection on the API server with the current collection."""
+ def update(
+ self,
+ other: Optional['Collection']=None,
+ num_retries: Optional[int]=None,
+ ) -> None:
+ """Merge another collection's contents into this one
+
+ This method compares the manifest of this collection instance with
+ another, then updates this instance's manifest with changes from the
+ other, renaming files to flag conflicts where necessary.
+
+ When called without any arguments, this method reloads the collection's
+ API record, and updates this instance with any changes that have
+ appeared server-side. If this instance does not have a corresponding
+ API record, this method raises `arvados.errors.ArgumentError`.
+
+ Arguments:
+ * other: arvados.collection.Collection | None --- The collection
+ whose contents should be merged into this instance. When not
+ provided, this method reloads this collection's API record and
+ constructs a Collection object from it. If this instance does not
+ have a corresponding API record, this method raises
+ `arvados.errors.ArgumentError`.
+
+ * num_retries: int | None --- The number of times to retry reloading
+ the collection's API record from the API server. If not specified,
+ uses the `num_retries` provided when this instance was constructed.
+ """
if other is None:
if self._manifest_locator is None:
raise errors.ArgumentError("`other` is None but collection does not have a manifest_locator uuid")
@@ -1406,7 +1268,7 @@ class Collection(RichCollectionBase):
@synchronized
def _my_api(self):
if self._api_client is None:
- self._api_client = ThreadSafeApiCache(self._config)
+ self._api_client = ThreadSafeApiCache(self._config, version='v1')
if self._keep_client is None:
self._keep_client = self._api_client.keep
return self._api_client
@@ -1430,8 +1292,7 @@ class Collection(RichCollectionBase):
copies=copies,
put_threads=self.put_threads,
num_retries=self.num_retries,
- storage_classes_func=self.storage_classes_desired,
- get_threads=self.get_threads,)
+ storage_classes_func=self.storage_classes_desired)
return self._block_manager
def _remember_api_response(self, response):
@@ -1478,33 +1339,66 @@ class Collection(RichCollectionBase):
return self
def __exit__(self, exc_type, exc_value, traceback):
- """Support scoped auto-commit in a with: block."""
+ """Exit a context with this collection instance
+
+ If no exception was raised inside the context block, and this
+ collection is writable and has a corresponding API record, that
+ record will be updated to match the state of this instance at the end
+ of the block.
+ """
if exc_type is None:
if self.writable() and self._has_collection_uuid():
self.save()
self.stop_threads()
- def stop_threads(self):
+ def stop_threads(self) -> None:
+ """Stop background Keep upload/download threads"""
if self._block_manager is not None:
self._block_manager.stop_threads()
@synchronized
- def manifest_locator(self):
- """Get the manifest locator, if any.
-
- The manifest locator will be set when the collection is loaded from an
- API server record or the portable data hash of a manifest.
-
- The manifest locator will be None if the collection is newly created or
- was created directly from manifest text. The method `save_new()` will
- assign a manifest locator.
-
+ def manifest_locator(self) -> Optional[str]:
+ """Get this collection's manifest locator, if any
+
+ * If this collection instance is associated with an API record with a
+ UUID, return that.
+ * Otherwise, if this collection instance was loaded from an API record
+ by portable data hash, return that.
+ * Otherwise, return `None`.
"""
return self._manifest_locator
@synchronized
- def clone(self, new_parent=None, new_name=None, readonly=False, new_config=None):
- if new_config is None:
+ def clone(
+ self,
+ new_parent: Optional['Collection']=None,
+ new_name: Optional[str]=None,
+ readonly: bool=False,
+ new_config: Optional[Mapping[str, str]]=None,
+ ) -> 'Collection':
+ """Create a Collection object with the same contents as this instance
+
+ This method creates a new Collection object with contents that match
+ this instance's. The new collection will not be associated with any API
+ record.
+
+ Arguments:
+
+ * new_parent: arvados.collection.Collection | None --- This value is
+ passed to the new Collection's constructor as the `parent`
+ argument.
+
+ * new_name: str | None --- This value is unused.
+
+ * readonly: bool --- If this value is true, this method constructs and
+ returns a `CollectionReader`. Otherwise, it returns a mutable
+ `Collection`. Default `False`.
+
+ * new_config: Mapping[str, str] | None --- This value is passed to the
+ new Collection's constructor as `apiconfig`. If no value is provided,
+ defaults to the configuration passed to this instance's constructor.
+ """
+ if new_config is None:
new_config = self._config
if readonly:
newcollection = CollectionReader(parent=new_parent, apiconfig=new_config)
@@ -1515,31 +1409,31 @@ class Collection(RichCollectionBase):
return newcollection
@synchronized
- def api_response(self):
- """Returns information about this Collection fetched from the API server.
-
- If the Collection exists in Keep but not the API server, currently
- returns None. Future versions may provide a synthetic response.
+ def api_response(self) -> Optional[Dict[str, Any]]:
+ """Get this instance's associated API record
+ If this Collection instance has an associated API record, return it.
+ Otherwise, return `None`.
"""
return self._api_response
- def find_or_create(self, path, create_type):
- """See `RichCollectionBase.find_or_create`"""
+ def find_or_create(
+ self,
+ path: str,
+ create_type: CreateType,
+ ) -> CollectionItem:
if path == ".":
return self
else:
return super(Collection, self).find_or_create(path[2:] if path.startswith("./") else path, create_type)
- def find(self, path):
- """See `RichCollectionBase.find`"""
+ def find(self, path: str) -> CollectionItem:
if path == ".":
return self
else:
return super(Collection, self).find(path[2:] if path.startswith("./") else path)
- def remove(self, path, recursive=False):
- """See `RichCollectionBase.remove`"""
+ def remove(self, path: str, recursive: bool=False) -> None:
if path == ".":
raise errors.ArgumentError("Cannot remove '.'")
else:
@@ -1548,49 +1442,52 @@ class Collection(RichCollectionBase):
@must_be_writable
@synchronized
@retry_method
- def save(self,
- properties=None,
- storage_classes=None,
- trash_at=None,
- merge=True,
- num_retries=None,
- preserve_version=False):
- """Save collection to an existing collection record.
-
- Commit pending buffer blocks to Keep, merge with remote record (if
- merge=True, the default), and update the collection record. Returns
- the current manifest text.
-
- Will raise AssertionError if not associated with a collection record on
- the API server. If you want to save a manifest to Keep only, see
- `save_new()`.
-
- :properties:
- Additional properties of collection. This value will replace any existing
- properties of collection.
-
- :storage_classes:
- Specify desirable storage classes to be used when writing data to Keep.
-
- :trash_at:
- A collection is *expiring* when it has a *trash_at* time in the future.
- An expiring collection can be accessed as normal,
- but is scheduled to be trashed automatically at the *trash_at* time.
-
- :merge:
- Update and merge remote changes before saving. Otherwise, any
- remote changes will be ignored and overwritten.
-
- :num_retries:
- Retry count on API calls (if None, use the collection default)
-
- :preserve_version:
- If True, indicate that the collection content being saved right now
- should be preserved in a version snapshot if the collection record is
- updated in the future. Requires that the API server has
- Collections.CollectionVersioning enabled, if not, setting this will
- raise an exception.
+ def save(
+ self,
+ properties: Optional[Properties]=None,
+ storage_classes: Optional[StorageClasses]=None,
+ trash_at: Optional[datetime.datetime]=None,
+ merge: bool=True,
+ num_retries: Optional[int]=None,
+ preserve_version: bool=False,
+ ) -> str:
+ """Save collection to an existing API record
+
+ This method updates the instance's corresponding API record to match
+ the instance's state. If this instance does not have a corresponding API
+ record yet, raises `AssertionError`. (To create a new API record, use
+ `Collection.save_new`.) This method returns the saved collection
+ manifest.
+
+ Arguments:
+ * properties: dict[str, Any] | None --- If provided, the API record will
+ be updated with these properties. Note this will completely replace
+ any existing properties.
+
+ * storage_classes: list[str] | None --- If provided, the API record will
+ be updated with this value in the `storage_classes_desired` field.
+ This value will also be saved on the instance and used for any
+ changes that follow.
+
+ * trash_at: datetime.datetime | None --- If provided, the API record
+ will be updated with this value in the `trash_at` field.
+
+ * merge: bool --- If `True` (the default), this method will first
+ reload this collection's API record, and merge any new contents into
+ this instance before saving changes. See `Collection.update` for
+ details.
+
+ * num_retries: int | None --- The number of times to retry reloading
+ the collection's API record from the API server. If not specified,
+ uses the `num_retries` provided when this instance was constructed.
+
+ * preserve_version: bool --- This value will be passed to directly
+ to the underlying API call. If `True`, the Arvados API will
+ preserve the versions of this collection both immediately before
+ and after the update. If `True` when the API server is not
+ configured with collection versioning, this method raises
+ `arvados.errors.ArgumentError`.
"""
if properties and type(properties) is not dict:
raise errors.ArgumentError("properties must be dictionary type.")
@@ -1654,60 +1551,66 @@ class Collection(RichCollectionBase):
@must_be_writable
@synchronized
@retry_method
- def save_new(self, name=None,
- create_collection_record=True,
- owner_uuid=None,
- properties=None,
- storage_classes=None,
- trash_at=None,
- ensure_unique_name=False,
- num_retries=None,
- preserve_version=False):
- """Save collection to a new collection record.
-
- Commit pending buffer blocks to Keep and, when create_collection_record
- is True (default), create a new collection record. After creating a
- new collection record, this Collection object will be associated with
- the new record used by `save()`. Returns the current manifest text.
-
- :name:
- The collection name.
-
- :create_collection_record:
- If True, create a collection record on the API server.
- If False, only commit blocks to Keep and return the manifest text.
-
- :owner_uuid:
- the user, or project uuid that will own this collection.
- If None, defaults to the current user.
-
- :properties:
- Additional properties of collection. This value will replace any existing
- properties of collection.
-
- :storage_classes:
- Specify desirable storage classes to be used when writing data to Keep.
-
- :trash_at:
- A collection is *expiring* when it has a *trash_at* time in the future.
- An expiring collection can be accessed as normal,
- but is scheduled to be trashed automatically at the *trash_at* time.
-
- :ensure_unique_name:
- If True, ask the API server to rename the collection
- if it conflicts with a collection with the same name and owner. If
- False, a name conflict will result in an error.
-
- :num_retries:
- Retry count on API calls (if None, use the collection default)
-
- :preserve_version:
- If True, indicate that the collection content being saved right now
- should be preserved in a version snapshot if the collection record is
- updated in the future. Requires that the API server has
- Collections.CollectionVersioning enabled, if not, setting this will
- raise an exception.
+ def save_new(
+ self,
+ name: Optional[str]=None,
+ create_collection_record: bool=True,
+ owner_uuid: Optional[str]=None,
+ properties: Optional[Properties]=None,
+ storage_classes: Optional[StorageClasses]=None,
+ trash_at: Optional[datetime.datetime]=None,
+ ensure_unique_name: bool=False,
+ num_retries: Optional[int]=None,
+ preserve_version: bool=False,
+ ):
+ """Save collection to a new API record
+
+ This method finishes uploading new data blocks and (optionally)
+ creates a new API collection record with the provided data. If a new
+ record is created, this instance becomes associated with that record
+ for future updates like `save()`. This method returns the saved
+ collection manifest.
+
+ Arguments:
+
+ * name: str | None --- The `name` field to use on the new collection
+ record. If not specified, a generic default name is generated.
+ * create_collection_record: bool --- If `True` (the default), creates a
+ collection record on the API server. If `False`, the method finishes
+ all data uploads and only returns the resulting collection manifest
+ without sending it to the API server.
+
+ * owner_uuid: str | None --- The `owner_uuid` field to use on the
+ new collection record.
+
+ * properties: dict[str, Any] | None --- The `properties` field to use on
+ the new collection record.
+
+ * storage_classes: list[str] | None --- The
+ `storage_classes_desired` field to use on the new collection record.
+
+ * trash_at: datetime.datetime | None --- The `trash_at` field to use
+ on the new collection record.
+
+ * ensure_unique_name: bool --- This value is passed directly to the
+ Arvados API when creating the collection record. If `True`, the API
+ server may modify the submitted `name` to ensure the collection's
+ `name`+`owner_uuid` combination is unique. If `False` (the default),
+ if a collection already exists with this same `name`+`owner_uuid`
+ combination, creating a collection record will raise a validation
+ error.
+
+ * num_retries: int | None --- The number of times to retry reloading
+ the collection's API record from the API server. If not specified,
+ uses the `num_retries` provided when this instance was constructed.
+
+ * preserve_version: bool --- This value will be passed to directly
+ to the underlying API call. If `True`, the Arvados API will
+ preserve the versions of this collection both immediately before
+ and after the update. If `True` when the API server is not
+ configured with collection versioning, this method raises
+ `arvados.errors.ArgumentError`.
"""
if properties and type(properties) is not dict:
raise errors.ArgumentError("properties must be dictionary type.")
@@ -1768,7 +1671,7 @@ class Collection(RichCollectionBase):
_segment_re = re.compile(r'(\d+):(\d+):(\S+)')
def _unescape_manifest_path(self, path):
- return re.sub('\\\\([0-3][0-7][0-7])', lambda m: chr(int(m.group(1), 8)), path)
+ return re.sub(r'\\([0-3][0-7][0-7])', lambda m: chr(int(m.group(1), 8)), path)
@synchronized
def _import_manifest(self, manifest_text):
@@ -1845,17 +1748,24 @@ class Collection(RichCollectionBase):
self.set_committed(True)
@synchronized
- def notify(self, event, collection, name, item):
+ def notify(
+ self,
+ event: ChangeType,
+ collection: 'RichCollectionBase',
+ name: str,
+ item: CollectionItem,
+ ) -> None:
if self._callback:
self._callback(event, collection, name, item)
class Subcollection(RichCollectionBase):
- """This is a subdirectory within a collection that doesn't have its own API
- server record.
-
- Subcollection locking falls under the umbrella lock of its root collection.
+ """Read and manipulate a stream/directory within an Arvados collection
+ This class represents a single stream (like a directory) within an Arvados
+ `Collection`. It is returned by `Collection.find` and provides the same API.
+ Operations that work on the API collection record propagate to the parent
+ `Collection` object.
"""
def __init__(self, parent, name):
@@ -1865,10 +1775,10 @@ class Subcollection(RichCollectionBase):
self.name = name
self.num_retries = parent.num_retries
- def root_collection(self):
+ def root_collection(self) -> 'Collection':
return self.parent.root_collection()
- def writable(self):
+ def writable(self) -> bool:
return self.root_collection().writable()
def _my_api(self):
@@ -1880,11 +1790,15 @@ class Subcollection(RichCollectionBase):
def _my_block_manager(self):
return self.root_collection()._my_block_manager()
- def stream_name(self):
+ def stream_name(self) -> str:
return os.path.join(self.parent.stream_name(), self.name)
@synchronized
- def clone(self, new_parent, new_name):
+ def clone(
+ self,
+ new_parent: Optional['Collection']=None,
+ new_name: Optional[str]=None,
+ ) -> 'Subcollection':
c = Subcollection(new_parent, new_name)
c._clonefrom(self)
return c
@@ -1911,11 +1825,11 @@ class Subcollection(RichCollectionBase):
class CollectionReader(Collection):
- """A read-only collection object.
-
- Initialize from a collection UUID or portable data hash, or raw
- manifest text. See `Collection` constructor for detailed options.
+ """Read-only `Collection` subclass
+ This class will never create or update any API collection records. You can
+ use this class for additional code safety when you only need to read
+ existing collections.
"""
def __init__(self, manifest_locator_or_text, *args, **kwargs):
self._in_init = True
@@ -1929,7 +1843,7 @@ class CollectionReader(Collection):
# all_streams() and all_files()
self._streams = None
- def writable(self):
+ def writable(self) -> bool:
return self._in_init
def _populate_streams(orig_func):
@@ -1946,16 +1860,10 @@ class CollectionReader(Collection):
return orig_func(self, *args, **kwargs)
return populate_streams_wrapper
+ @arvados.util._deprecated('3.0', 'Collection iteration')
@_populate_streams
def normalize(self):
- """Normalize the streams returned by `all_streams`.
-
- This method is kept for backwards compatability and only affects the
- behavior of `all_streams()` and `all_files()`
-
- """
-
- # Rearrange streams
+ """Normalize the streams returned by `all_streams`"""
streams = {}
for s in self.all_streams():
for f in s.all_files():
@@ -1969,13 +1877,436 @@ class CollectionReader(Collection):
self._streams = [normalize_stream(s, streams[s])
for s in sorted(streams)]
+
+ @arvados.util._deprecated('3.0', 'Collection iteration')
@_populate_streams
def all_streams(self):
return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
for s in self._streams]
+ @arvados.util._deprecated('3.0', 'Collection iteration')
@_populate_streams
def all_files(self):
for s in self.all_streams():
for f in s.all_files():
yield f
+
+
+class CollectionWriter(CollectionBase):
+ """Create a new collection from scratch
+
+ .. WARNING:: Deprecated
+ This class is deprecated. Prefer `arvados.collection.Collection`
+ instead.
+ """
+
+ @arvados.util._deprecated('3.0', 'arvados.collection.Collection')
+ def __init__(self, api_client=None, num_retries=0, replication=None):
+ """Instantiate a CollectionWriter.
+
+ CollectionWriter lets you build a new Arvados Collection from scratch.
+ Write files to it. The CollectionWriter will upload data to Keep as
+ appropriate, and provide you with the Collection manifest text when
+ you're finished.
+
+ Arguments:
+ * api_client: The API client to use to look up Collections. If not
+ provided, CollectionReader will build one from available Arvados
+ configuration.
+ * num_retries: The default number of times to retry failed
+ service requests. Default 0. You may change this value
+ after instantiation, but note those changes may not
+ propagate to related objects like the Keep client.
+ * replication: The number of copies of each block to store.
+ If this argument is None or not supplied, replication is
+ the server-provided default if available, otherwise 2.
+ """
+ self._api_client = api_client
+ self.num_retries = num_retries
+ self.replication = (2 if replication is None else replication)
+ self._keep_client = None
+ self._data_buffer = []
+ self._data_buffer_len = 0
+ self._current_stream_files = []
+ self._current_stream_length = 0
+ self._current_stream_locators = []
+ self._current_stream_name = '.'
+ self._current_file_name = None
+ self._current_file_pos = 0
+ self._finished_streams = []
+ self._close_file = None
+ self._queued_file = None
+ self._queued_dirents = deque()
+ self._queued_trees = deque()
+ self._last_open = None
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type is None:
+ self.finish()
+
+ def do_queued_work(self):
+ # The work queue consists of three pieces:
+ # * _queued_file: The file object we're currently writing to the
+ # Collection.
+ # * _queued_dirents: Entries under the current directory
+ # (_queued_trees[0]) that we want to write or recurse through.
+ # This may contain files from subdirectories if
+ # max_manifest_depth == 0 for this directory.
+ # * _queued_trees: Directories that should be written as separate
+ # streams to the Collection.
+ # This function handles the smallest piece of work currently queued
+ # (current file, then current directory, then next directory) until
+ # no work remains. The _work_THING methods each do a unit of work on
+ # THING. _queue_THING methods add a THING to the work queue.
+ while True:
+ if self._queued_file:
+ self._work_file()
+ elif self._queued_dirents:
+ self._work_dirents()
+ elif self._queued_trees:
+ self._work_trees()
+ else:
+ break
+
+ def _work_file(self):
+ while True:
+ buf = self._queued_file.read(config.KEEP_BLOCK_SIZE)
+ if not buf:
+ break
+ self.write(buf)
+ self.finish_current_file()
+ if self._close_file:
+ self._queued_file.close()
+ self._close_file = None
+ self._queued_file = None
+
+ def _work_dirents(self):
+ path, stream_name, max_manifest_depth = self._queued_trees[0]
+ if stream_name != self.current_stream_name():
+ self.start_new_stream(stream_name)
+ while self._queued_dirents:
+ dirent = self._queued_dirents.popleft()
+ target = os.path.join(path, dirent)
+ if os.path.isdir(target):
+ self._queue_tree(target,
+ os.path.join(stream_name, dirent),
+ max_manifest_depth - 1)
+ else:
+ self._queue_file(target, dirent)
+ break
+ if not self._queued_dirents:
+ self._queued_trees.popleft()
+
+ def _work_trees(self):
+ path, stream_name, max_manifest_depth = self._queued_trees[0]
+ d = arvados.util.listdir_recursive(
+ path, max_depth = (None if max_manifest_depth == 0 else 0))
+ if d:
+ self._queue_dirents(stream_name, d)
+ else:
+ self._queued_trees.popleft()
+
+ def _queue_file(self, source, filename=None):
+ assert (self._queued_file is None), "tried to queue more than one file"
+ if not hasattr(source, 'read'):
+ source = open(source, 'rb')
+ self._close_file = True
+ else:
+ self._close_file = False
+ if filename is None:
+ filename = os.path.basename(source.name)
+ self.start_new_file(filename)
+ self._queued_file = source
+
+ def _queue_dirents(self, stream_name, dirents):
+ assert (not self._queued_dirents), "tried to queue more than one tree"
+ self._queued_dirents = deque(sorted(dirents))
+
+ def _queue_tree(self, path, stream_name, max_manifest_depth):
+ self._queued_trees.append((path, stream_name, max_manifest_depth))
+
+ def write_file(self, source, filename=None):
+ self._queue_file(source, filename)
+ self.do_queued_work()
+
+ def write_directory_tree(self,
+ path, stream_name='.', max_manifest_depth=-1):
+ self._queue_tree(path, stream_name, max_manifest_depth)
+ self.do_queued_work()
+
+ def write(self, newdata):
+ if isinstance(newdata, bytes):
+ pass
+ elif isinstance(newdata, str):
+ newdata = newdata.encode()
+ elif hasattr(newdata, '__iter__'):
+ for s in newdata:
+ self.write(s)
+ return
+ self._data_buffer.append(newdata)
+ self._data_buffer_len += len(newdata)
+ self._current_stream_length += len(newdata)
+ while self._data_buffer_len >= config.KEEP_BLOCK_SIZE:
+ self.flush_data()
+
+ def open(self, streampath, filename=None):
+ """open(streampath[, filename]) -> file-like object
+
+ Pass in the path of a file to write to the Collection, either as a
+ single string or as two separate stream name and file name arguments.
+ This method returns a file-like object you can write to add it to the
+ Collection.
+
+ You may only have one file object from the Collection open at a time,
+ so be sure to close the object when you're done. Using the object in
+ a with statement makes that easy:
+
+ with cwriter.open('./doc/page1.txt') as outfile:
+ outfile.write(page1_data)
+ with cwriter.open('./doc/page2.txt') as outfile:
+ outfile.write(page2_data)
+ """
+ if filename is None:
+ streampath, filename = split(streampath)
+ if self._last_open and not self._last_open.closed:
+ raise errors.AssertionError(
+ u"can't open '{}' when '{}' is still open".format(
+ filename, self._last_open.name))
+ if streampath != self.current_stream_name():
+ self.start_new_stream(streampath)
+ self.set_current_file_name(filename)
+ self._last_open = _WriterFile(self, filename)
+ return self._last_open
+
+ def flush_data(self):
+ data_buffer = b''.join(self._data_buffer)
+ if data_buffer:
+ self._current_stream_locators.append(
+ self._my_keep().put(
+ data_buffer[0:config.KEEP_BLOCK_SIZE],
+ copies=self.replication))
+ self._data_buffer = [data_buffer[config.KEEP_BLOCK_SIZE:]]
+ self._data_buffer_len = len(self._data_buffer[0])
+
+ def start_new_file(self, newfilename=None):
+ self.finish_current_file()
+ self.set_current_file_name(newfilename)
+
+ def set_current_file_name(self, newfilename):
+ if re.search(r'[\t\n]', newfilename):
+ raise errors.AssertionError(
+ "Manifest filenames cannot contain whitespace: %s" %
+ newfilename)
+ elif re.search(r'\x00', newfilename):
+ raise errors.AssertionError(
+ "Manifest filenames cannot contain NUL characters: %s" %
+ newfilename)
+ self._current_file_name = newfilename
+
+ def current_file_name(self):
+ return self._current_file_name
+
+ def finish_current_file(self):
+ if self._current_file_name is None:
+ if self._current_file_pos == self._current_stream_length:
+ return
+ raise errors.AssertionError(
+ "Cannot finish an unnamed file " +
+ "(%d bytes at offset %d in '%s' stream)" %
+ (self._current_stream_length - self._current_file_pos,
+ self._current_file_pos,
+ self._current_stream_name))
+ self._current_stream_files.append([
+ self._current_file_pos,
+ self._current_stream_length - self._current_file_pos,
+ self._current_file_name])
+ self._current_file_pos = self._current_stream_length
+ self._current_file_name = None
+
+ def start_new_stream(self, newstreamname='.'):
+ self.finish_current_stream()
+ self.set_current_stream_name(newstreamname)
+
+ def set_current_stream_name(self, newstreamname):
+ if re.search(r'[\t\n]', newstreamname):
+ raise errors.AssertionError(
+ "Manifest stream names cannot contain whitespace: '%s'" %
+ (newstreamname))
+ self._current_stream_name = '.' if newstreamname=='' else newstreamname
+
+ def current_stream_name(self):
+ return self._current_stream_name
+
+ def finish_current_stream(self):
+ self.finish_current_file()
+ self.flush_data()
+ if not self._current_stream_files:
+ pass
+ elif self._current_stream_name is None:
+ raise errors.AssertionError(
+ "Cannot finish an unnamed stream (%d bytes in %d files)" %
+ (self._current_stream_length, len(self._current_stream_files)))
+ else:
+ if not self._current_stream_locators:
+ self._current_stream_locators.append(config.EMPTY_BLOCK_LOCATOR)
+ self._finished_streams.append([self._current_stream_name,
+ self._current_stream_locators,
+ self._current_stream_files])
+ self._current_stream_files = []
+ self._current_stream_length = 0
+ self._current_stream_locators = []
+ self._current_stream_name = None
+ self._current_file_pos = 0
+ self._current_file_name = None
+
+ def finish(self):
+ """Store the manifest in Keep and return its locator.
+
+ This is useful for storing manifest fragments (task outputs)
+ temporarily in Keep during a Crunch job.
+
+ In other cases you should make a collection instead, by
+ sending manifest_text() to the API server's "create
+ collection" endpoint.
+ """
+ return self._my_keep().put(self.manifest_text().encode(),
+ copies=self.replication)
+
+ def portable_data_hash(self):
+ stripped = self.stripped_manifest().encode()
+ return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))
+
+ def manifest_text(self):
+ self.finish_current_stream()
+ manifest = ''
+
+ for stream in self._finished_streams:
+ if not re.search(r'^\.(/.*)?$', stream[0]):
+ manifest += './'
+ manifest += stream[0].replace(' ', '\\040')
+ manifest += ' ' + ' '.join(stream[1])
+ manifest += ' ' + ' '.join("%d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040')) for sfile in stream[2])
+ manifest += "\n"
+
+ return manifest
+
+ def data_locators(self):
+ ret = []
+ for name, locators, files in self._finished_streams:
+ ret += locators
+ return ret
+
+ def save_new(self, name=None):
+ return self._api_client.collections().create(
+ ensure_unique_name=True,
+ body={
+ 'name': name,
+ 'manifest_text': self.manifest_text(),
+ }).execute(num_retries=self.num_retries)
+
+
+class ResumableCollectionWriter(CollectionWriter):
+ """CollectionWriter that can serialize internal state to disk
+
+ .. WARNING:: Deprecated
+ This class is deprecated. Prefer `arvados.collection.Collection`
+ instead.
+ """
+
+ STATE_PROPS = ['_current_stream_files', '_current_stream_length',
+ '_current_stream_locators', '_current_stream_name',
+ '_current_file_name', '_current_file_pos', '_close_file',
+ '_data_buffer', '_dependencies', '_finished_streams',
+ '_queued_dirents', '_queued_trees']
+
+ @arvados.util._deprecated('3.0', 'arvados.collection.Collection')
+ def __init__(self, api_client=None, **kwargs):
+ self._dependencies = {}
+ super(ResumableCollectionWriter, self).__init__(api_client, **kwargs)
+
+ @classmethod
+ def from_state(cls, state, *init_args, **init_kwargs):
+ # Try to build a new writer from scratch with the given state.
+ # If the state is not suitable to resume (because files have changed,
+ # been deleted, aren't predictable, etc.), raise a
+ # StaleWriterStateError. Otherwise, return the initialized writer.
+ # The caller is responsible for calling writer.do_queued_work()
+ # appropriately after it's returned.
+ writer = cls(*init_args, **init_kwargs)
+ for attr_name in cls.STATE_PROPS:
+ attr_value = state[attr_name]
+ attr_class = getattr(writer, attr_name).__class__
+ # Coerce the value into the same type as the initial value, if
+ # needed.
+ if attr_class not in (type(None), attr_value.__class__):
+ attr_value = attr_class(attr_value)
+ setattr(writer, attr_name, attr_value)
+ # Check dependencies before we try to resume anything.
+ if any(KeepLocator(ls).permission_expired()
+ for ls in writer._current_stream_locators):
+ raise errors.StaleWriterStateError(
+ "locators include expired permission hint")
+ writer.check_dependencies()
+ if state['_current_file'] is not None:
+ path, pos = state['_current_file']
+ try:
+ writer._queued_file = open(path, 'rb')
+ writer._queued_file.seek(pos)
+ except IOError as error:
+ raise errors.StaleWriterStateError(
+ u"failed to reopen active file {}: {}".format(path, error))
+ return writer
+
+ def check_dependencies(self):
+ for path, orig_stat in listitems(self._dependencies):
+ if not S_ISREG(orig_stat[ST_MODE]):
+ raise errors.StaleWriterStateError(u"{} not file".format(path))
+ try:
+ now_stat = tuple(os.stat(path))
+ except OSError as error:
+ raise errors.StaleWriterStateError(
+ u"failed to stat {}: {}".format(path, error))
+ if ((not S_ISREG(now_stat[ST_MODE])) or
+ (orig_stat[ST_MTIME] != now_stat[ST_MTIME]) or
+ (orig_stat[ST_SIZE] != now_stat[ST_SIZE])):
+ raise errors.StaleWriterStateError(u"{} changed".format(path))
+
+ def dump_state(self, copy_func=lambda x: x):
+ state = {attr: copy_func(getattr(self, attr))
+ for attr in self.STATE_PROPS}
+ if self._queued_file is None:
+ state['_current_file'] = None
+ else:
+ state['_current_file'] = (os.path.realpath(self._queued_file.name),
+ self._queued_file.tell())
+ return state
+
+ def _queue_file(self, source, filename=None):
+ try:
+ src_path = os.path.realpath(source)
+ except Exception:
+ raise errors.AssertionError(u"{} not a file path".format(source))
+ try:
+ path_stat = os.stat(src_path)
+ except OSError as stat_error:
+ path_stat = None
+ super(ResumableCollectionWriter, self)._queue_file(source, filename)
+ fd_stat = os.fstat(self._queued_file.fileno())
+ if not S_ISREG(fd_stat.st_mode):
+ # We won't be able to resume from this cache anyway, so don't
+ # worry about further checks.
+ self._dependencies[source] = tuple(fd_stat)
+ elif path_stat is None:
+ raise errors.AssertionError(
+ u"could not stat {}: {}".format(source, stat_error))
+ elif path_stat.st_ino != fd_stat.st_ino:
+ raise errors.AssertionError(
+ u"{} changed between open and stat calls".format(source))
+ else:
+ self._dependencies[src_path] = tuple(fd_stat)
+
+ def write(self, data):
+ if self._queued_file is None:
+ raise errors.AssertionError(
+ "resumable writer can't accept unsourced data")
+ return super(ResumableCollectionWriter, self).write(data)
diff --git a/sdk/python/arvados/commands/_util.py b/sdk/python/arvados/commands/_util.py
index d10d38eb5b..6c792b2e0d 100644
--- a/sdk/python/arvados/commands/_util.py
+++ b/sdk/python/arvados/commands/_util.py
@@ -4,12 +4,21 @@
import argparse
import errno
-import os
+import json
import logging
+import os
+import re
import signal
-from future.utils import listitems, listvalues
import sys
+FILTER_STR_RE = re.compile(r'''
+^\(
+\ *(\w+)
+\ *(<|<=|=|>=|>)
+\ *(\w+)
+\ *\)$
+''', re.ASCII | re.VERBOSE)
+
def _pos_int(s):
num = int(s)
if num < 0:
@@ -17,9 +26,9 @@ def _pos_int(s):
return num
retry_opt = argparse.ArgumentParser(add_help=False)
-retry_opt.add_argument('--retries', type=_pos_int, default=3, help="""
+retry_opt.add_argument('--retries', type=_pos_int, default=10, help="""
Maximum number of times to retry server requests that encounter temporary
-failures (e.g., server down). Default 3.""")
+failures (e.g., server down). Default 10.""")
def _ignore_error(error):
return None
@@ -61,5 +70,89 @@ def install_signal_handlers():
for sigcode in CAUGHT_SIGNALS}
def restore_signal_handlers():
- for sigcode, orig_handler in listitems(orig_signal_handlers):
+ for sigcode, orig_handler in orig_signal_handlers.items():
signal.signal(sigcode, orig_handler)
+
+def validate_filters(filters):
+ """Validate user-provided filters
+
+ This function validates that a user-defined object represents valid
+ Arvados filters that can be passed to an API client: that it's a list of
+ 3-element lists with the field name and operator given as strings. If any
+ of these conditions are not true, it raises a ValueError with details about
+ the problem.
+
+ It returns validated filters. Currently the provided filters are returned
+ unmodified. Future versions of this function may clean up the filters with
+ "obvious" type conversions, so callers SHOULD use the returned value for
+ Arvados API calls.
+ """
+ if not isinstance(filters, list):
+ raise ValueError(f"filters are not a list: {filters!r}")
+ for index, f in enumerate(filters):
+ if isinstance(f, str):
+ match = FILTER_STR_RE.fullmatch(f)
+ if match is None:
+ raise ValueError(f"filter at index {index} has invalid syntax: {f!r}")
+ s, op, o = match.groups()
+ if s[0].isdigit():
+ raise ValueError(f"filter at index {index} has invalid syntax: bad field name {s!r}")
+ if o[0].isdigit():
+ raise ValueError(f"filter at index {index} has invalid syntax: bad field name {o!r}")
+ continue
+ elif not isinstance(f, list):
+ raise ValueError(f"filter at index {index} is not a string or list: {f!r}")
+ try:
+ s, op, o = f
+ except ValueError:
+ raise ValueError(
+ f"filter at index {index} does not have three items (field name, operator, operand): {f!r}",
+ ) from None
+ if not isinstance(s, str):
+ raise ValueError(f"filter at index {index} field name is not a string: {s!r}")
+ if not isinstance(op, str):
+ raise ValueError(f"filter at index {index} operator is not a string: {op!r}")
+ return filters
+
+
+class JSONArgument:
+ """Parse a JSON file from a command line argument string or path
+
+ JSONArgument objects can be called with a string and return an arbitrary
+ object. First it will try to decode the string as JSON. If that fails, it
+ will try to open a file at the path named by the string, and decode it as
+ JSON. If that fails, it raises ValueError with more detail.
+
+ This is designed to be used as an argparse argument type.
+ Typical usage looks like:
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--object', type=JSONArgument(), ...)
+
+ You can construct JSONArgument with an optional validation function. If
+ given, it is called with the object decoded from user input, and its
+ return value replaces it. It should raise ValueError if there is a problem
+ with the input. (argparse turns ValueError into a useful error message.)
+
+ filters_type = JSONArgument(validate_filters)
+ parser.add_argument('--filters', type=filters_type, ...)
+ """
+ def __init__(self, validator=None):
+ self.validator = validator
+
+ def __call__(self, value):
+ try:
+ retval = json.loads(value)
+ except json.JSONDecodeError:
+ try:
+ with open(value, 'rb') as json_file:
+ retval = json.load(json_file)
+ except json.JSONDecodeError as error:
+ raise ValueError(f"error decoding JSON from file {value!r}: {error}") from None
+ except (FileNotFoundError, ValueError):
+ raise ValueError(f"not a valid JSON string or file path: {value!r}") from None
+ except OSError as error:
+ raise ValueError(f"error reading JSON file path {value!r}: {error.strerror}") from None
+ if self.validator is not None:
+ retval = self.validator(retval)
+ return retval
diff --git a/sdk/python/arvados/commands/arv_copy.py b/sdk/python/arvados/commands/arv_copy.py
index 7951842acc..51251737cf 100755
--- a/sdk/python/arvados/commands/arv_copy.py
+++ b/sdk/python/arvados/commands/arv_copy.py
@@ -30,11 +30,15 @@ import getpass
import os
import re
import shutil
+import subprocess
import sys
import logging
import tempfile
import urllib.parse
import io
+import json
+import queue
+import threading
import arvados
import arvados.config
@@ -42,9 +46,8 @@ import arvados.keep
import arvados.util
import arvados.commands._util as arv_cmd
import arvados.commands.keepdocker
-import ruamel.yaml as yaml
+import arvados.http_to_keep
-from arvados.api import OrderedJsonModel
from arvados._version import __version__
COMMIT_HASH_RE = re.compile(r'^[0-9a-f]{1,40}$')
@@ -105,6 +108,11 @@ def main():
copy_opts.add_argument(
'--storage-classes', dest='storage_classes',
help='Comma separated list of storage classes to be used when saving data to the destinaton Arvados instance.')
+ copy_opts.add_argument("--varying-url-params", type=str, default="",
+ help="A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.")
+
+ copy_opts.add_argument("--prefer-cached-downloads", action="store_true", default=False,
+ help="If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).")
copy_opts.add_argument(
'object_uuid',
@@ -125,40 +133,51 @@ def main():
else:
logger.setLevel(logging.INFO)
- if not args.source_arvados:
+ if not args.source_arvados and arvados.util.uuid_pattern.match(args.object_uuid):
args.source_arvados = args.object_uuid[:5]
# Create API clients for the source and destination instances
- src_arv = api_for_instance(args.source_arvados)
- dst_arv = api_for_instance(args.destination_arvados)
+ src_arv = api_for_instance(args.source_arvados, args.retries)
+ dst_arv = api_for_instance(args.destination_arvados, args.retries)
if not args.project_uuid:
args.project_uuid = dst_arv.users().current().execute(num_retries=args.retries)["uuid"]
# Identify the kind of object we have been given, and begin copying.
t = uuid_type(src_arv, args.object_uuid)
- if t == 'Collection':
- set_src_owner_uuid(src_arv.collections(), args.object_uuid, args)
- result = copy_collection(args.object_uuid,
- src_arv, dst_arv,
- args)
- elif t == 'Workflow':
- set_src_owner_uuid(src_arv.workflows(), args.object_uuid, args)
- result = copy_workflow(args.object_uuid, src_arv, dst_arv, args)
- elif t == 'Group':
- set_src_owner_uuid(src_arv.groups(), args.object_uuid, args)
- result = copy_project(args.object_uuid, src_arv, dst_arv, args.project_uuid, args)
- else:
- abort("cannot copy object {} of type {}".format(args.object_uuid, t))
+
+ try:
+ if t == 'Collection':
+ set_src_owner_uuid(src_arv.collections(), args.object_uuid, args)
+ result = copy_collection(args.object_uuid,
+ src_arv, dst_arv,
+ args)
+ elif t == 'Workflow':
+ set_src_owner_uuid(src_arv.workflows(), args.object_uuid, args)
+ result = copy_workflow(args.object_uuid, src_arv, dst_arv, args)
+ elif t == 'Group':
+ set_src_owner_uuid(src_arv.groups(), args.object_uuid, args)
+ result = copy_project(args.object_uuid, src_arv, dst_arv, args.project_uuid, args)
+ elif t == 'httpURL':
+ result = copy_from_http(args.object_uuid, src_arv, dst_arv, args)
+ else:
+ abort("cannot copy object {} of type {}".format(args.object_uuid, t))
+ except Exception as e:
+ logger.error("%s", e, exc_info=args.verbose)
+ exit(1)
# Clean up any outstanding temp git repositories.
for d in listvalues(local_repo_dir):
shutil.rmtree(d, ignore_errors=True)
+ if not result:
+ exit(1)
+
# If no exception was thrown and the response does not have an
# error_token field, presume success
- if 'error_token' in result or 'uuid' not in result:
- logger.error("API server returned an error result: {}".format(result))
+ if result is None or 'error_token' in result or 'uuid' not in result:
+ if result:
+ logger.error("API server returned an error result: {}".format(result))
exit(1)
print(result['uuid'])
@@ -187,10 +206,10 @@ def set_src_owner_uuid(resource, uuid, args):
# Otherwise, it is presumed to be the name of a file in
# $HOME/.config/arvados/instance_name.conf
#
-def api_for_instance(instance_name):
+def api_for_instance(instance_name, num_retries):
if not instance_name:
# Use environment
- return arvados.api('v1', model=OrderedJsonModel())
+ return arvados.api('v1')
if '/' in instance_name:
config_file = instance_name
@@ -214,7 +233,8 @@ def api_for_instance(instance_name):
host=cfg['ARVADOS_API_HOST'],
token=cfg['ARVADOS_API_TOKEN'],
insecure=api_is_insecure,
- model=OrderedJsonModel())
+ num_retries=num_retries,
+ )
else:
abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
return client
@@ -222,8 +242,12 @@ def api_for_instance(instance_name):
# Check if git is available
def check_git_availability():
try:
- arvados.util.run_command(['git', '--help'])
- except Exception:
+ subprocess.run(
+ ['git', '--version'],
+ check=True,
+ stdout=subprocess.DEVNULL,
+ )
+ except FileNotFoundError:
abort('git command is not available. Please ensure git is installed.')
@@ -302,21 +326,26 @@ def copy_workflow(wf_uuid, src, dst, args):
# copy collections and docker images
if args.recursive and wf["definition"]:
- wf_def = yaml.safe_load(wf["definition"])
- if wf_def is not None:
- locations = []
- docker_images = {}
- graph = wf_def.get('$graph', None)
- if graph is not None:
- workflow_collections(graph, locations, docker_images)
- else:
- workflow_collections(wf_def, locations, docker_images)
+ env = {"ARVADOS_API_HOST": urllib.parse.urlparse(src._rootDesc["rootUrl"]).netloc,
+ "ARVADOS_API_TOKEN": src.api_token,
+ "PATH": os.environ["PATH"]}
+ try:
+ result = subprocess.run(["arvados-cwl-runner", "--quiet", "--print-keep-deps", "arvwf:"+wf_uuid],
+ capture_output=True, env=env)
+ except FileNotFoundError:
+ no_arv_copy = True
+ else:
+ no_arv_copy = result.returncode == 2
+
+ if no_arv_copy:
+ raise Exception('Copying workflows requires arvados-cwl-runner 2.7.1 or later to be installed in PATH.')
+ elif result.returncode != 0:
+ raise Exception('There was an error getting Keep dependencies from workflow using arvados-cwl-runner --print-keep-deps')
- if locations:
- copy_collections(locations, src, dst, args)
+ locations = json.loads(result.stdout)
- for image in docker_images:
- copy_docker_image(image, docker_images[image], src, dst, args)
+ if locations:
+ copy_collections(locations, src, dst, args)
# copy the workflow itself
del wf['uuid']
@@ -560,6 +589,125 @@ def copy_collection(obj_uuid, src, dst, args):
else:
progress_writer = None
+ # go through the words
+ # put each block loc into 'get' queue
+ # 'get' threads get block and put it into 'put' queue
+ # 'put' threads put block and then update dst_locators
+ #
+ # after going through the whole manifest we go back through it
+ # again and build dst_manifest
+
+ lock = threading.Lock()
+
+ # the get queue should be unbounded because we'll add all the
+ # block hashes we want to get, but these are small
+ get_queue = queue.Queue()
+
+ threadcount = 4
+
+ # the put queue contains full data blocks
+ # and if 'get' is faster than 'put' we could end up consuming
+ # a great deal of RAM if it isn't bounded.
+ put_queue = queue.Queue(threadcount)
+ transfer_error = []
+
+ def get_thread():
+ while True:
+ word = get_queue.get()
+ if word is None:
+ put_queue.put(None)
+ get_queue.task_done()
+ return
+
+ blockhash = arvados.KeepLocator(word).md5sum
+ with lock:
+ if blockhash in dst_locators:
+ # Already uploaded
+ get_queue.task_done()
+ continue
+
+ try:
+ logger.debug("Getting block %s", word)
+ data = src_keep.get(word)
+ put_queue.put((word, data))
+ except e:
+ logger.error("Error getting block %s: %s", word, e)
+ transfer_error.append(e)
+ try:
+ # Drain the 'get' queue so we end early
+ while True:
+ get_queue.get(False)
+ get_queue.task_done()
+ except queue.Empty:
+ pass
+ finally:
+ get_queue.task_done()
+
+ def put_thread():
+ nonlocal bytes_written
+ while True:
+ item = put_queue.get()
+ if item is None:
+ put_queue.task_done()
+ return
+
+ word, data = item
+ loc = arvados.KeepLocator(word)
+ blockhash = loc.md5sum
+ with lock:
+ if blockhash in dst_locators:
+ # Already uploaded
+ put_queue.task_done()
+ continue
+
+ try:
+ logger.debug("Putting block %s (%s bytes)", blockhash, loc.size)
+ dst_locator = dst_keep.put(data, classes=(args.storage_classes or []))
+ with lock:
+ dst_locators[blockhash] = dst_locator
+ bytes_written += loc.size
+ if progress_writer:
+ progress_writer.report(obj_uuid, bytes_written, bytes_expected)
+ except e:
+ logger.error("Error putting block %s (%s bytes): %s", blockhash, loc.size, e)
+ try:
+ # Drain the 'get' queue so we end early
+ while True:
+ get_queue.get(False)
+ get_queue.task_done()
+ except queue.Empty:
+ pass
+ transfer_error.append(e)
+ finally:
+ put_queue.task_done()
+
+ for line in manifest.splitlines():
+ words = line.split()
+ for word in words[1:]:
+ try:
+ loc = arvados.KeepLocator(word)
+ except ValueError:
+ # If 'word' can't be parsed as a locator,
+ # presume it's a filename.
+ continue
+
+ get_queue.put(word)
+
+ for i in range(0, threadcount):
+ get_queue.put(None)
+
+ for i in range(0, threadcount):
+ threading.Thread(target=get_thread, daemon=True).start()
+
+ for i in range(0, threadcount):
+ threading.Thread(target=put_thread, daemon=True).start()
+
+ get_queue.join()
+ put_queue.join()
+
+ if len(transfer_error) > 0:
+ return {"error_token": "Failed to transfer blocks"}
+
for line in manifest.splitlines():
words = line.split()
dst_manifest.write(words[0])
@@ -573,16 +721,6 @@ def copy_collection(obj_uuid, src, dst, args):
dst_manifest.write(word)
continue
blockhash = loc.md5sum
- # copy this block if we haven't seen it before
- # (otherwise, just reuse the existing dst_locator)
- if blockhash not in dst_locators:
- logger.debug("Copying block %s (%s bytes)", blockhash, loc.size)
- if progress_writer:
- progress_writer.report(obj_uuid, bytes_written, bytes_expected)
- data = src_keep.get(word)
- dst_locator = dst_keep.put(data, classes=(args.storage_classes or []))
- dst_locators[blockhash] = dst_locator
- bytes_written += loc.size
dst_manifest.write(' ')
dst_manifest.write(dst_locators[blockhash])
dst_manifest.write("\n")
@@ -610,8 +748,6 @@ def select_git_url(api, repo_name, retries, allow_insecure_http, allow_insecure_
priority = https_url + other_url + http_url
- git_config = []
- git_url = None
for url in priority:
if url.startswith("http"):
u = urllib.parse.urlsplit(url)
@@ -623,17 +759,22 @@ def select_git_url(api, repo_name, retries, allow_insecure_http, allow_insecure_
try:
logger.debug("trying %s", url)
- arvados.util.run_command(["git"] + git_config + ["ls-remote", url],
- env={"HOME": os.environ["HOME"],
- "ARVADOS_API_TOKEN": api.api_token,
- "GIT_ASKPASS": "/bin/false"})
- except arvados.errors.CommandFailedError:
+ subprocess.run(
+ ['git', *git_config, 'ls-remote', url],
+ check=True,
+ env={
+ 'ARVADOS_API_TOKEN': api.api_token,
+ 'GIT_ASKPASS': '/bin/false',
+ 'HOME': os.environ['HOME'],
+ },
+ stdout=subprocess.DEVNULL,
+ )
+ except subprocess.CalledProcessError:
pass
else:
git_url = url
break
-
- if not git_url:
+ else:
raise Exception('Cannot access git repository, tried {}'
.format(priority))
@@ -696,20 +837,20 @@ def copy_project(obj_uuid, src, dst, owner_uuid, args):
# Copy collections
try:
- copy_collections([col["uuid"] for col in arvados.util.list_all(src.collections().list, filters=[["owner_uuid", "=", obj_uuid]])],
+ copy_collections([col["uuid"] for col in arvados.util.keyset_list_all(src.collections().list, filters=[["owner_uuid", "=", obj_uuid]])],
src, dst, args)
except Exception as e:
partial_error += "\n" + str(e)
# Copy workflows
- for w in arvados.util.list_all(src.workflows().list, filters=[["owner_uuid", "=", obj_uuid]]):
+ for w in arvados.util.keyset_list_all(src.workflows().list, filters=[["owner_uuid", "=", obj_uuid]]):
try:
copy_workflow(w["uuid"], src, dst, args)
except Exception as e:
partial_error += "\n" + "Error while copying %s: %s" % (w["uuid"], e)
if args.recursive:
- for g in arvados.util.list_all(src.groups().list, filters=[["owner_uuid", "=", obj_uuid]]):
+ for g in arvados.util.keyset_list_all(src.groups().list, filters=[["owner_uuid", "=", obj_uuid]]):
try:
copy_project(g["uuid"], src, dst, project_record["uuid"], args)
except Exception as e:
@@ -726,9 +867,14 @@ def copy_project(obj_uuid, src, dst, owner_uuid, args):
# repository)
#
def git_rev_parse(rev, repo):
- gitout, giterr = arvados.util.run_command(
- ['git', 'rev-parse', rev], cwd=repo)
- return gitout.strip()
+ proc = subprocess.run(
+ ['git', 'rev-parse', rev],
+ check=True,
+ cwd=repo,
+ stdout=subprocess.PIPE,
+ text=True,
+ )
+ return proc.stdout.read().strip()
# uuid_type(api, object_uuid)
#
@@ -743,6 +889,10 @@ def git_rev_parse(rev, repo):
def uuid_type(api, object_uuid):
if re.match(arvados.util.keep_locator_pattern, object_uuid):
return 'Collection'
+
+ if object_uuid.startswith("http:") or object_uuid.startswith("https:"):
+ return 'httpURL'
+
p = object_uuid.split('-')
if len(p) == 3:
type_prefix = p[1]
@@ -752,6 +902,27 @@ def uuid_type(api, object_uuid):
return k
return None
+
+def copy_from_http(url, src, dst, args):
+
+ project_uuid = args.project_uuid
+ varying_url_params = args.varying_url_params
+ prefer_cached_downloads = args.prefer_cached_downloads
+
+ cached = arvados.http_to_keep.check_cached_url(src, project_uuid, url, {},
+ varying_url_params=varying_url_params,
+ prefer_cached_downloads=prefer_cached_downloads)
+ if cached[2] is not None:
+ return copy_collection(cached[2], src, dst, args)
+
+ cached = arvados.http_to_keep.http_to_keep(dst, project_uuid, url,
+ varying_url_params=varying_url_params,
+ prefer_cached_downloads=prefer_cached_downloads)
+
+ if cached is not None:
+ return {"uuid": cached[2]}
+
+
def abort(msg, code=1):
logger.info("arv-copy: %s", msg)
exit(code)
diff --git a/sdk/python/arvados/commands/federation_migrate.py b/sdk/python/arvados/commands/federation_migrate.py
index 5c1bb29e76..770e1609db 100755
--- a/sdk/python/arvados/commands/federation_migrate.py
+++ b/sdk/python/arvados/commands/federation_migrate.py
@@ -24,6 +24,7 @@ import os
import hashlib
import re
from arvados._version import __version__
+from . import _util as arv_cmd
EMAIL=0
USERNAME=1
@@ -43,10 +44,10 @@ def connect_clusters(args):
host = r[0]
token = r[1]
print("Contacting %s" % (host))
- arv = arvados.api(host=host, token=token, cache=False)
+ arv = arvados.api(host=host, token=token, cache=False, num_retries=args.retries)
clusters[arv._rootDesc["uuidPrefix"]] = arv
else:
- arv = arvados.api(cache=False)
+ arv = arvados.api(cache=False, num_retries=args.retries)
rh = arv._rootDesc["remoteHosts"]
tok = arv.api_client_authorizations().current().execute()
token = "v2/%s/%s" % (tok["uuid"], tok["api_token"])
@@ -96,13 +97,12 @@ def fetch_users(clusters, loginCluster):
by_email = {}
by_username = {}
- users = []
- for c, arv in clusters.items():
- print("Getting user list from %s" % c)
- ul = arvados.util.list_all(arv.users().list, bypass_federation=True)
- for l in ul:
- if l["uuid"].startswith(c):
- users.append(l)
+ users = [
+ user
+ for prefix, arv in clusters.items()
+ for user in arvados.util.keyset_list_all(arv.users().list, bypass_federation=True)
+ if user['uuid'].startswith(prefix)
+ ]
# Users list is sorted by email
# Go through users and collect users with same email
@@ -110,7 +110,7 @@ def fetch_users(clusters, loginCluster):
# call add_accum_rows() to generate the report rows with
# the "home cluster" set, and also fill in the by_email table.
- users = sorted(users, key=lambda u: u["email"]+"::"+(u["username"] or "")+"::"+u["uuid"])
+ users.sort(key=lambda u: (u["email"], u["username"] or "", u["uuid"]))
accum = []
lastemail = None
@@ -326,7 +326,10 @@ def migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid):
def main():
- parser = argparse.ArgumentParser(description='Migrate users to federated identity, see https://doc.arvados.org/admin/merge-remote-account.html')
+ parser = argparse.ArgumentParser(
+ description='Migrate users to federated identity, see https://doc.arvados.org/admin/merge-remote-account.html',
+ parents=[arv_cmd.retry_opt],
+ )
parser.add_argument(
'--version', action='version', version="%s %s" % (sys.argv[0], __version__),
help='Print version and exit.')
diff --git a/sdk/python/arvados/commands/get.py b/sdk/python/arvados/commands/get.py
index bb421def61..b37a8477ac 100755
--- a/sdk/python/arvados/commands/get.py
+++ b/sdk/python/arvados/commands/get.py
@@ -6,6 +6,7 @@
import argparse
import hashlib
import os
+import pathlib
import re
import string
import sys
@@ -155,7 +156,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
request_id = arvados.util.new_request_id()
logger.info('X-Request-Id: '+request_id)
- api_client = arvados.api('v1', request_id=request_id)
+ api_client = arvados.api('v1', request_id=request_id, num_retries=args.retries)
r = re.search(r'^(.*?)(/.*)?$', args.locator)
col_loc = r.group(1)
@@ -197,8 +198,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
try:
reader = arvados.CollectionReader(
col_loc, api_client=api_client, num_retries=args.retries,
- keep_client=arvados.keep.KeepClient(block_cache=arvados.keep.KeepBlockCache((args.threads+1)*64 * 1024 * 1024)),
- get_threads=args.threads)
+ keep_client=arvados.keep.KeepClient(block_cache=arvados.keep.KeepBlockCache((args.threads+1)*64 * 1024 * 1024), num_prefetch_threads=args.threads))
except Exception as error:
logger.error("failed to read collection: {}".format(error))
return 1
@@ -262,7 +262,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
logger.error('Local file %s already exists.' % (outfilename,))
return 1
if args.r:
- arvados.util.mkdir_dash_p(os.path.dirname(outfilename))
+ pathlib.Path(outfilename).parent.mkdir(parents=True, exist_ok=True)
try:
outfile = open(outfilename, 'wb')
except Exception as error:
diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py
index db4edd2dfa..6823ee1bea 100644
--- a/sdk/python/arvados/commands/keepdocker.py
+++ b/sdk/python/arvados/commands/keepdocker.py
@@ -2,37 +2,29 @@
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import next
import argparse
import collections
import datetime
import errno
+import fcntl
import json
+import logging
import os
import re
+import subprocess
import sys
import tarfile
import tempfile
-import shutil
-import _strptime
-import fcntl
+
+import ciso8601
from operator import itemgetter
from stat import *
-if os.name == "posix" and sys.version_info[0] < 3:
- import subprocess32 as subprocess
-else:
- import subprocess
-
import arvados
+import arvados.config
import arvados.util
import arvados.commands._util as arv_cmd
import arvados.commands.put as arv_put
-from arvados.collection import CollectionReader
-import ciso8601
-import logging
-import arvados.config
-
from arvados._version import __version__
logger = logging.getLogger('arvados.keepdocker')
@@ -240,8 +232,9 @@ def docker_link_sort_key(link):
return (image_timestamp, created_timestamp)
def _get_docker_links(api_client, num_retries, **kwargs):
- links = arvados.util.list_all(api_client.links().list,
- num_retries, **kwargs)
+ links = list(arvados.util.keyset_list_all(
+ api_client.links().list, num_retries=num_retries, **kwargs,
+ ))
for link in links:
link['_sort_key'] = docker_link_sort_key(link)
links.sort(key=itemgetter('_sort_key'), reverse=True)
@@ -326,7 +319,7 @@ def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None,
dockerhash = hash_link_map[collection_uuid]['name']
except KeyError:
dockerhash = ''
- name_parts = link['name'].split(':', 1)
+ name_parts = link['name'].rsplit(':', 1)
images.append(_new_image_listing(link, dockerhash, *name_parts))
# Find any image hash links that did not have a corresponding name link,
@@ -340,10 +333,12 @@ def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None,
images.sort(key=itemgetter('_sort_key'), reverse=True)
# Remove any image listings that refer to unknown collections.
- existing_coll_uuids = {coll['uuid'] for coll in arvados.util.list_all(
- api_client.collections().list, num_retries,
- filters=[['uuid', 'in', [im['collection'] for im in images]]]+project_filter,
- select=['uuid'])}
+ existing_coll_uuids = {coll['uuid'] for coll in arvados.util.keyset_list_all(
+ api_client.collections().list,
+ num_retries=num_retries,
+ filters=[['uuid', 'in', [im['collection'] for im in images]]]+project_filter,
+ select=['uuid'],
+ )}
return [(image['collection'], image) for image in images
if image['collection'] in existing_coll_uuids]
@@ -356,10 +351,29 @@ def _uuid2pdh(api, uuid):
select=['portable_data_hash'],
).execute()['items'][0]['portable_data_hash']
+def load_image_metadata(image_file):
+ """Load an image manifest and config from an archive
+
+ Given an image archive as an open binary file object, this function loads
+ the image manifest and configuration, deserializing each from JSON and
+ returning them in a 2-tuple of dicts.
+ """
+ image_file.seek(0)
+ with tarfile.open(fileobj=image_file) as image_tar:
+ with image_tar.extractfile('manifest.json') as manifest_file:
+ image_manifest_list = json.load(manifest_file)
+ # Because arv-keepdocker only saves one image, there should only be
+ # one manifest. This extracts that from the list and raises
+ # ValueError if there's not exactly one.
+ image_manifest, = image_manifest_list
+ with image_tar.extractfile(image_manifest['Config']) as config_file:
+ image_config = json.load(config_file)
+ return image_manifest, image_config
+
def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None):
args = arg_parser.parse_args(arguments)
if api is None:
- api = arvados.api('v1')
+ api = arvados.api('v1', num_retries=args.retries)
if args.image is None or args.image == 'images':
fmt = "{:30} {:10} {:12} {:29} {:20}\n"
@@ -386,6 +400,16 @@ def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None)
elif args.tag is None:
args.tag = 'latest'
+ if '/' in args.image:
+ hostport, path = args.image.split('/', 1)
+ if hostport.endswith(':443'):
+ # "docker pull host:443/asdf" transparently removes the
+ # :443 (which is redundant because https is implied) and
+ # after it succeeds "docker images" will list "host/asdf",
+ # not "host:443/asdf". If we strip the :443 then the name
+ # doesn't change underneath us.
+ args.image = '/'.join([hostport[:-4], path])
+
# Pull the image if requested, unless the image is specified as a hash
# that we already have.
if args.pull and not find_image_hashes(args.image):
@@ -522,21 +546,9 @@ def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None)
# Managed properties could be already set
coll_properties = api.collections().get(uuid=coll_uuid).execute(num_retries=args.retries).get('properties', {})
coll_properties.update({"docker-image-repo-tag": image_repo_tag})
-
api.collections().update(uuid=coll_uuid, body={"properties": coll_properties}).execute(num_retries=args.retries)
- # Read the image metadata and make Arvados links from it.
- image_file.seek(0)
- image_tar = tarfile.open(fileobj=image_file)
- image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
- if image_hash_type:
- json_filename = raw_image_hash + '.json'
- else:
- json_filename = raw_image_hash + '/json'
- json_file = image_tar.extractfile(image_tar.getmember(json_filename))
- image_metadata = json.loads(json_file.read().decode('utf-8'))
- json_file.close()
- image_tar.close()
+ _, image_metadata = load_image_metadata(image_file)
link_base = {'head_uuid': coll_uuid, 'properties': {}}
if 'created' in image_metadata:
link_base['properties']['image_timestamp'] = image_metadata['created']
diff --git a/sdk/python/arvados/commands/ls.py b/sdk/python/arvados/commands/ls.py
index 86e728ed49..ac038f5040 100644
--- a/sdk/python/arvados/commands/ls.py
+++ b/sdk/python/arvados/commands/ls.py
@@ -43,7 +43,7 @@ def main(args, stdout, stderr, api_client=None, logger=None):
args = parse_args(args)
if api_client is None:
- api_client = arvados.api('v1')
+ api_client = arvados.api('v1', num_retries=args.retries)
if logger is None:
logger = logging.getLogger('arvados.arv-ls')
diff --git a/sdk/python/arvados/commands/migrate19.py b/sdk/python/arvados/commands/migrate19.py
index 3ce47b2066..2fef419ee8 100644
--- a/sdk/python/arvados/commands/migrate19.py
+++ b/sdk/python/arvados/commands/migrate19.py
@@ -18,6 +18,7 @@ import arvados
import arvados.commands.keepdocker
from arvados._version import __version__
from arvados.collection import CollectionReader
+from .. import util
logger = logging.getLogger('arvados.migrate-docker19')
logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')
@@ -29,6 +30,7 @@ _migration_link_name = 'migrate_1.9_1.10'
class MigrationFailed(Exception):
pass
+@util._deprecated('3.0')
def main(arguments=None):
"""Docker image format migration tool for Arvados.
diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py
index be7cd629c9..0e732eafde 100644
--- a/sdk/python/arvados/commands/put.py
+++ b/sdk/python/arvados/commands/put.py
@@ -1136,7 +1136,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
logging.getLogger('arvados').handlers[0].setFormatter(formatter)
if api_client is None:
- api_client = arvados.api('v1', request_id=request_id)
+ api_client = arvados.api('v1', request_id=request_id, num_retries=args.retries)
if install_sig_handlers:
arv_cmd.install_signal_handlers()
diff --git a/sdk/python/arvados/commands/ws.py b/sdk/python/arvados/commands/ws.py
index 37dab55d60..04a90cf20b 100644
--- a/sdk/python/arvados/commands/ws.py
+++ b/sdk/python/arvados/commands/ws.py
@@ -10,12 +10,13 @@ import arvados
import json
from arvados.events import subscribe
from arvados._version import __version__
+from . import _util as arv_cmd
import signal
def main(arguments=None):
logger = logging.getLogger('arvados.arv-ws')
- parser = argparse.ArgumentParser()
+ parser = argparse.ArgumentParser(parents=[arv_cmd.retry_opt])
parser.add_argument('--version', action='version',
version="%s %s" % (sys.argv[0], __version__),
help='Print version and exit.')
@@ -56,7 +57,7 @@ def main(arguments=None):
filters = new_filters
known_component_jobs = pipeline_jobs
- api = arvados.api('v1')
+ api = arvados.api('v1', num_retries=args.retries)
if args.uuid:
filters += [ ['object_uuid', '=', args.uuid] ]
diff --git a/sdk/python/arvados/config.py b/sdk/python/arvados/config.py
index e17eb1ff57..6f3bd02790 100644
--- a/sdk/python/arvados/config.py
+++ b/sdk/python/arvados/config.py
@@ -38,9 +38,7 @@ def load(config_file):
cfg = {}
with open(config_file, "r") as f:
for config_line in f:
- if re.match('^\s*$', config_line):
- continue
- if re.match('^\s*#', config_line):
+ if re.match(r'^\s*(?:#|$)', config_line):
continue
var, val = config_line.rstrip().split('=', 2)
cfg[var] = val
diff --git a/sdk/python/arvados/crunch.py b/sdk/python/arvados/crunch.py
index 70b8b44033..6dd144c43b 100644
--- a/sdk/python/arvados/crunch.py
+++ b/sdk/python/arvados/crunch.py
@@ -5,6 +5,7 @@
from builtins import object
import json
import os
+from . import util
class TaskOutputDir(object):
"""Keep-backed directory for staging outputs of Crunch tasks.
@@ -21,6 +22,7 @@ class TaskOutputDir(object):
f.write('42')
arvados.current_task().set_output(out.manifest_text())
"""
+ @util._deprecated('3.0', 'arvados-cwl-runner or the containers API')
def __init__(self):
self.path = os.environ['TASK_KEEPMOUNT_TMP']
diff --git a/sdk/python/arvados/diskcache.py b/sdk/python/arvados/diskcache.py
new file mode 100644
index 0000000000..528a7d28b5
--- /dev/null
+++ b/sdk/python/arvados/diskcache.py
@@ -0,0 +1,259 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import threading
+import mmap
+import os
+import traceback
+import stat
+import tempfile
+import fcntl
+import time
+import errno
+import logging
+import weakref
+import collections
+
+_logger = logging.getLogger('arvados.keep')
+
+cacheblock_suffix = ".keepcacheblock"
+
+class DiskCacheSlot(object):
+ __slots__ = ("locator", "ready", "content", "cachedir", "filehandle", "linger")
+
+ def __init__(self, locator, cachedir):
+ self.locator = locator
+ self.ready = threading.Event()
+ self.content = None
+ self.cachedir = cachedir
+ self.filehandle = None
+ self.linger = None
+
+ def get(self):
+ self.ready.wait()
+ # 'content' can None, an empty byte string, or a nonempty mmap
+ # region. If it is an mmap region, we want to advise the
+ # kernel we're going to use it. This nudges the kernel to
+ # re-read most or all of the block if necessary (instead of
+ # just a few pages at a time), reducing the number of page
+ # faults and improving performance by 4x compared to not
+ # calling madvise.
+ if self.content:
+ self.content.madvise(mmap.MADV_WILLNEED)
+ return self.content
+
+ def set(self, value):
+ tmpfile = None
+ try:
+ if value is None:
+ self.content = None
+ self.ready.set()
+ return False
+
+ if len(value) == 0:
+ # Can't mmap a 0 length file
+ self.content = b''
+ self.ready.set()
+ return True
+
+ if self.content is not None:
+ # Has been set already
+ self.ready.set()
+ return False
+
+ blockdir = os.path.join(self.cachedir, self.locator[0:3])
+ os.makedirs(blockdir, mode=0o700, exist_ok=True)
+
+ final = os.path.join(blockdir, self.locator) + cacheblock_suffix
+
+ self.filehandle = tempfile.NamedTemporaryFile(dir=blockdir, delete=False, prefix="tmp", suffix=cacheblock_suffix)
+ tmpfile = self.filehandle.name
+ os.chmod(tmpfile, stat.S_IRUSR | stat.S_IWUSR)
+
+ # aquire a shared lock, this tells other processes that
+ # we're using this block and to please not delete it.
+ fcntl.flock(self.filehandle, fcntl.LOCK_SH)
+
+ self.filehandle.write(value)
+ self.filehandle.flush()
+ os.rename(tmpfile, final)
+ tmpfile = None
+
+ self.content = mmap.mmap(self.filehandle.fileno(), 0, access=mmap.ACCESS_READ)
+ # only set the event when mmap is successful
+ self.ready.set()
+ return True
+ finally:
+ if tmpfile is not None:
+ # If the tempfile hasn't been renamed on disk yet, try to delete it.
+ try:
+ os.remove(tmpfile)
+ except:
+ pass
+
+ def size(self):
+ if self.content is None:
+ if self.linger is not None:
+ # If it is still lingering (object is still accessible
+ # through the weak reference) it is still taking up
+ # space.
+ content = self.linger()
+ if content is not None:
+ return len(content)
+ return 0
+ else:
+ return len(self.content)
+
+ def evict(self):
+ if not self.content:
+ return
+
+ # The mmap region might be in use when we decided to evict
+ # it. This can happen if the cache is too small.
+ #
+ # If we call close() now, it'll throw an error if
+ # something tries to access it.
+ #
+ # However, we don't need to explicitly call mmap.close()
+ #
+ # I confirmed in mmapmodule.c that that both close
+ # and deallocate do the same thing:
+ #
+ # a) close the file descriptor
+ # b) unmap the memory range
+ #
+ # So we can forget it in the cache and delete the file on
+ # disk, and it will tear it down after any other
+ # lingering Python references to the mapped memory are
+ # gone.
+
+ blockdir = os.path.join(self.cachedir, self.locator[0:3])
+ final = os.path.join(blockdir, self.locator) + cacheblock_suffix
+ try:
+ fcntl.flock(self.filehandle, fcntl.LOCK_UN)
+
+ # try to get an exclusive lock, this ensures other
+ # processes are not using the block. It is
+ # nonblocking and will throw an exception if we
+ # can't get it, which is fine because that means
+ # we just won't try to delete it.
+ #
+ # I should note here, the file locking is not
+ # strictly necessary, we could just remove it and
+ # the kernel would ensure that the underlying
+ # inode remains available as long as other
+ # processes still have the file open. However, if
+ # you have multiple processes sharing the cache
+ # and deleting each other's files, you'll end up
+ # with a bunch of ghost files that don't show up
+ # in the file system but are still taking up
+ # space, which isn't particularly user friendly.
+ # The locking strategy ensures that cache blocks
+ # in use remain visible.
+ #
+ fcntl.flock(self.filehandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+ os.remove(final)
+ return True
+ except OSError:
+ pass
+ finally:
+ self.filehandle = None
+ self.content = None
+
+ @staticmethod
+ def get_from_disk(locator, cachedir):
+ blockdir = os.path.join(cachedir, locator[0:3])
+ final = os.path.join(blockdir, locator) + cacheblock_suffix
+
+ try:
+ filehandle = open(final, "rb")
+
+ # aquire a shared lock, this tells other processes that
+ # we're using this block and to please not delete it.
+ fcntl.flock(filehandle, fcntl.LOCK_SH)
+
+ content = mmap.mmap(filehandle.fileno(), 0, access=mmap.ACCESS_READ)
+ dc = DiskCacheSlot(locator, cachedir)
+ dc.filehandle = filehandle
+ dc.content = content
+ dc.ready.set()
+ return dc
+ except FileNotFoundError:
+ pass
+ except Exception as e:
+ traceback.print_exc()
+
+ return None
+
+ @staticmethod
+ def cache_usage(cachedir):
+ usage = 0
+ for root, dirs, files in os.walk(cachedir):
+ for name in files:
+ if not name.endswith(cacheblock_suffix):
+ continue
+
+ blockpath = os.path.join(root, name)
+ res = os.stat(blockpath)
+ usage += res.st_size
+ return usage
+
+
+ @staticmethod
+ def init_cache(cachedir, maxslots):
+ #
+ # First check the disk cache works at all by creating a 1 byte cache entry
+ #
+ checkexists = DiskCacheSlot.get_from_disk('0cc175b9c0f1b6a831c399e269772661', cachedir)
+ ds = DiskCacheSlot('0cc175b9c0f1b6a831c399e269772661', cachedir)
+ ds.set(b'a')
+ if checkexists is None:
+ # Don't keep the test entry around unless it existed beforehand.
+ ds.evict()
+
+ # map in all the files in the cache directory, up to max slots.
+ # after max slots, try to delete the excess blocks.
+ #
+ # this gives the calling process ownership of all the blocks
+
+ blocks = []
+ for root, dirs, files in os.walk(cachedir):
+ for name in files:
+ if not name.endswith(cacheblock_suffix):
+ continue
+
+ blockpath = os.path.join(root, name)
+ res = os.stat(blockpath)
+
+ if len(name) == (32+len(cacheblock_suffix)) and not name.startswith("tmp"):
+ blocks.append((name[0:32], res.st_atime))
+ elif name.startswith("tmp") and ((time.time() - res.st_mtime) > 60):
+ # found a temporary file more than 1 minute old,
+ # try to delete it.
+ try:
+ os.remove(blockpath)
+ except:
+ pass
+
+ # sort by access time (atime), going from most recently
+ # accessed (highest timestamp) to least recently accessed
+ # (lowest timestamp).
+ blocks.sort(key=lambda x: x[1], reverse=True)
+
+ # Map in all the files we found, up to maxslots, if we exceed
+ # maxslots, start throwing things out.
+ cachelist: collections.OrderedDict = collections.OrderedDict()
+ for b in blocks:
+ got = DiskCacheSlot.get_from_disk(b[0], cachedir)
+ if got is None:
+ continue
+ if len(cachelist) < maxslots:
+ cachelist[got.locator] = got
+ else:
+ # we found more blocks than maxslots, try to
+ # throw it out of the cache.
+ got.evict()
+
+ return cachelist
diff --git a/sdk/python/arvados/errors.py b/sdk/python/arvados/errors.py
index 4fe1f76543..15b1f6d4b9 100644
--- a/sdk/python/arvados/errors.py
+++ b/sdk/python/arvados/errors.py
@@ -82,6 +82,8 @@ class KeepReadError(KeepRequestError):
pass
class KeepWriteError(KeepRequestError):
pass
+class KeepCacheError(KeepRequestError):
+ pass
class NotFoundError(KeepReadError):
pass
class NotImplementedError(Exception):
diff --git a/sdk/python/arvados/events.py b/sdk/python/arvados/events.py
index e53e4980a8..88a916e659 100644
--- a/sdk/python/arvados/events.py
+++ b/sdk/python/arvados/events.py
@@ -1,155 +1,322 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""Follow events on an Arvados cluster
-from __future__ import absolute_import
-from future import standard_library
-standard_library.install_aliases()
-from builtins import str
-from builtins import object
-import arvados
-from . import config
-from . import errors
-from .retry import RetryLoop
+This module provides different ways to get notified about events that happen
+on an Arvados cluster. You indicate which events you want updates about, and
+provide a function that is called any time one of those events is received
+from the server.
-import logging
+`subscribe` is the main entry point. It helps you construct one of the two
+API-compatible client classes: `EventClient` (which uses WebSockets) or
+`PollClient` (which periodically queries the logs list methods).
+"""
+
+import enum
import json
-import _thread
-import threading
-import time
+import logging
import os
import re
import ssl
-from ws4py.client.threadedclient import WebSocketClient
+import sys
+import _thread
+import threading
+import time
+
+import websockets.exceptions as ws_exc
+import websockets.sync.client as ws_client
+
+from . import config
+from . import errors
+from . import util
+from .retry import RetryLoop
+from ._version import __version__
+
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Union,
+)
+
+EventCallback = Callable[[Dict[str, Any]], object]
+"""Type signature for an event handler callback"""
+FilterCondition = List[Union[None, str, 'Filter']]
+"""Type signature for a single filter condition"""
+Filter = List[FilterCondition]
+"""Type signature for an entire filter"""
_logger = logging.getLogger('arvados.events')
+class WSMethod(enum.Enum):
+ """Arvados WebSocket methods
-class _EventClient(WebSocketClient):
- def __init__(self, url, filters, on_event, last_log_id, on_closed):
- ssl_options = {'ca_certs': arvados.util.ca_certs_path()}
- if config.flag_is_true('ARVADOS_API_HOST_INSECURE'):
- ssl_options['cert_reqs'] = ssl.CERT_NONE
- else:
- ssl_options['cert_reqs'] = ssl.CERT_REQUIRED
+ This enum represents valid values for the `method` field in messages
+ sent to an Arvados WebSocket server.
+ """
+ SUBSCRIBE = 'subscribe'
+ SUB = SUBSCRIBE
+ UNSUBSCRIBE = 'unsubscribe'
+ UNSUB = UNSUBSCRIBE
- # Warning: If the host part of url resolves to both IPv6 and
- # IPv4 addresses (common with "localhost"), only one of them
- # will be attempted -- and it might not be the right one. See
- # ws4py's WebSocketBaseClient.__init__.
- super(_EventClient, self).__init__(url, ssl_options=ssl_options)
- self.filters = filters
- self.on_event = on_event
+class EventClient(threading.Thread):
+ """Follow Arvados events via WebSocket
+
+ EventClient follows events on Arvados cluster published by the WebSocket
+ server. Users can select the events they want to follow and run their own
+ callback function on each.
+ """
+ _USER_AGENT = 'Python/{}.{}.{} arvados.events/{}'.format(
+ *sys.version_info[:3],
+ __version__,
+ )
+
+ def __init__(
+ self,
+ url: str,
+ filters: Optional[Filter],
+ on_event_cb: EventCallback,
+ last_log_id: Optional[int]=None,
+ *,
+ insecure: Optional[bool]=None,
+ ) -> None:
+ """Initialize a WebSocket client
+
+ Constructor arguments:
+
+ * url: str --- The `wss` URL for an Arvados WebSocket server.
+
+ * filters: arvados.events.Filter | None --- One event filter to
+ subscribe to after connecting to the WebSocket server. If not
+ specified, the client will subscribe to all events.
+
+ * on_event_cb: arvados.events.EventCallback --- When the client
+ receives an event from the WebSocket server, it calls this
+ function with the event object.
+
+ * last_log_id: int | None --- If specified, this will be used as the
+ value for the `last_log_id` field in subscribe messages sent by
+ the client.
+
+ Constructor keyword arguments:
+
+ * insecure: bool | None --- If `True`, the client will not check the
+ validity of the server's TLS certificate. If not specified, uses
+ the value from the user's `ARVADOS_API_HOST_INSECURE` setting.
+ """
+ self.url = url
+ self.filters = [filters or []]
+ self.on_event_cb = on_event_cb
self.last_log_id = last_log_id
- self._closing_lock = threading.RLock()
- self._closing = False
- self._closed = threading.Event()
- self.on_closed = on_closed
+ self.is_closed = threading.Event()
+ self._ssl_ctx = ssl.create_default_context(
+ purpose=ssl.Purpose.SERVER_AUTH,
+ cafile=util.ca_certs_path(),
+ )
+ if insecure is None:
+ insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+ if insecure:
+ self._ssl_ctx.check_hostname = False
+ self._ssl_ctx.verify_mode = ssl.CERT_NONE
+ self._subscribe_lock = threading.Lock()
+ self._connect()
+ super().__init__(daemon=True)
+ self.start()
+
+ def _connect(self) -> None:
+ # There are no locks protecting this method. After the thread starts,
+ # it should only be called from inside.
+ self._client = ws_client.connect(
+ self.url,
+ logger=_logger,
+ ssl_context=self._ssl_ctx,
+ user_agent_header=self._USER_AGENT,
+ )
+ self._client_ok = True
+
+ def _subscribe(self, f: Filter, last_log_id: Optional[int]) -> None:
+ extra = {}
+ if last_log_id is not None:
+ extra['last_log_id'] = last_log_id
+ return self._update_sub(WSMethod.SUBSCRIBE, f, **extra)
- def opened(self):
- for f in self.filters:
- self.subscribe(f, self.last_log_id)
+ def _update_sub(self, method: WSMethod, f: Filter, **extra: Any) -> None:
+ msg = json.dumps({
+ 'method': method.value,
+ 'filters': f,
+ **extra,
+ })
+ self._client.send(msg)
- def closed(self, code, reason=None):
- self._closed.set()
- self.on_closed()
+ def close(self, code: int=1000, reason: str='', timeout: float=0) -> None:
+ """Close the WebSocket connection and stop processing events
- def received_message(self, m):
- with self._closing_lock:
- if not self._closing:
- self.on_event(json.loads(str(m)))
+ Arguments:
- def close(self, code=1000, reason='', timeout=0):
- """Close event client and optionally wait for it to finish.
+ * code: int --- The WebSocket close code sent to the server when
+ disconnecting. Default 1000.
- :timeout: is the number of seconds to wait for ws4py to
- indicate that the connection has closed.
+ * reason: str --- The WebSocket close reason sent to the server when
+ disconnecting. Default is an empty string.
+
+ * timeout: float --- How long to wait for the WebSocket server to
+ acknowledge the disconnection, in seconds. Default 0, which means
+ no timeout.
"""
- super(_EventClient, self).close(code, reason)
- with self._closing_lock:
- # make sure we don't process any more messages.
- self._closing = True
- # wait for ws4py to tell us the connection is closed.
- self._closed.wait(timeout=timeout)
+ self.is_closed.set()
+ self._client.close_timeout = timeout or None
+ self._client.close(code, reason)
- def subscribe(self, f, last_log_id=None):
- m = {"method": "subscribe", "filters": f}
- if last_log_id is not None:
- m["last_log_id"] = last_log_id
- self.send(json.dumps(m))
+ def run_forever(self) -> None:
+ """Run the WebSocket client indefinitely
- def unsubscribe(self, f):
- self.send(json.dumps({"method": "unsubscribe", "filters": f}))
+ This method blocks until the `close` method is called (e.g., from
+ another thread) or the client permanently loses its connection.
+ """
+ # Have to poll here to let KeyboardInterrupt get raised.
+ while not self.is_closed.wait(1):
+ pass
+ def subscribe(self, f: Filter, last_log_id: Optional[int]=None) -> None:
+ """Subscribe to another set of events from the server
-class EventClient(object):
- def __init__(self, url, filters, on_event_cb, last_log_id):
- self.url = url
- if filters:
- self.filters = [filters]
- else:
- self.filters = [[]]
- self.on_event_cb = on_event_cb
- self.last_log_id = last_log_id
- self.is_closed = threading.Event()
- self._setup_event_client()
+ Arguments:
- def _setup_event_client(self):
- self.ec = _EventClient(self.url, self.filters, self.on_event,
- self.last_log_id, self.on_closed)
- self.ec.daemon = True
- try:
- self.ec.connect()
- except Exception:
- self.ec.close_connection()
- raise
+ * f: arvados.events.Filter | None --- One filter to subscribe to
+ events for.
- def subscribe(self, f, last_log_id=None):
- self.filters.append(f)
- self.ec.subscribe(f, last_log_id)
+ * last_log_id: int | None --- If specified, request events starting
+ from this id. If not specified, the server will only send events
+ that occur after processing the subscription.
+ """
+ with self._subscribe_lock:
+ self._subscribe(f, last_log_id)
+ self.filters.append(f)
- def unsubscribe(self, f):
- del self.filters[self.filters.index(f)]
- self.ec.unsubscribe(f)
+ def unsubscribe(self, f: Filter) -> None:
+ """Unsubscribe from an event stream
- def close(self, code=1000, reason='', timeout=0):
- self.is_closed.set()
- self.ec.close(code, reason, timeout)
+ Arguments:
+
+ * f: arvados.events.Filter | None --- One event filter to stop
+ receiving events for.
+ """
+ with self._subscribe_lock:
+ try:
+ index = self.filters.index(f)
+ except ValueError:
+ raise ValueError(f"filter not subscribed: {f!r}") from None
+ self._update_sub(WSMethod.UNSUBSCRIBE, f)
+ del self.filters[index]
+
+ def on_closed(self) -> None:
+ """Handle disconnection from the WebSocket server
+
+ This method is called when the client loses its connection from
+ receiving events. This implementation tries to establish a new
+ connection if it was not closed client-side.
+ """
+ if self.is_closed.is_set():
+ return
+ _logger.warning("Unexpected close. Reconnecting.")
+ for _ in RetryLoop(num_retries=25, backoff_start=.1, max_wait=15):
+ try:
+ self._connect()
+ except Exception as e:
+ _logger.warning("Error '%s' during websocket reconnect.", e)
+ else:
+ _logger.warning("Reconnect successful.")
+ break
+ else:
+ _logger.error("EventClient thread could not contact websocket server.")
+ self.is_closed.set()
+ _thread.interrupt_main()
+
+ def on_event(self, m: Dict[str, Any]) -> None:
+ """Handle an event from the WebSocket server
- def on_event(self, m):
- if m.get('id') != None:
- self.last_log_id = m.get('id')
+ This method is called whenever the client receives an event from the
+ server. This implementation records the `id` field internally, then
+ calls the callback function provided at initialization time.
+
+ Arguments:
+
+ * m: Dict[str, Any] --- The event object, deserialized from JSON.
+ """
+ try:
+ self.last_log_id = m['id']
+ except KeyError:
+ pass
try:
self.on_event_cb(m)
- except Exception as e:
+ except Exception:
_logger.exception("Unexpected exception from event callback.")
_thread.interrupt_main()
- def on_closed(self):
- if not self.is_closed.is_set():
- _logger.warning("Unexpected close. Reconnecting.")
- for tries_left in RetryLoop(num_retries=25, backoff_start=.1, max_wait=15):
- try:
- self._setup_event_client()
- _logger.warning("Reconnect successful.")
- break
- except Exception as e:
- _logger.warning("Error '%s' during websocket reconnect.", e)
- if tries_left == 0:
- _logger.exception("EventClient thread could not contact websocket server.")
- self.is_closed.set()
- _thread.interrupt_main()
- return
+ def run(self) -> None:
+ """Run the client loop
- def run_forever(self):
- # Have to poll here to let KeyboardInterrupt get raised.
- while not self.is_closed.wait(1):
- pass
+ This method runs in a separate thread to receive and process events
+ from the server.
+ """
+ self.setName(f'ArvadosWebsockets-{self.ident}')
+ while self._client_ok and not self.is_closed.is_set():
+ try:
+ with self._subscribe_lock:
+ for f in self.filters:
+ self._subscribe(f, self.last_log_id)
+ for msg_s in self._client:
+ if not self.is_closed.is_set():
+ msg = json.loads(msg_s)
+ self.on_event(msg)
+ except ws_exc.ConnectionClosed:
+ self._client_ok = False
+ self.on_closed()
class PollClient(threading.Thread):
- def __init__(self, api, filters, on_event, poll_time, last_log_id):
+ """Follow Arvados events via polling logs
+
+ PollClient follows events on Arvados cluster by periodically running
+ logs list API calls. Users can select the events they want to follow and
+ run their own callback function on each.
+ """
+ def __init__(
+ self,
+ api: 'arvados.api_resources.ArvadosAPIClient',
+ filters: Optional[Filter],
+ on_event: EventCallback,
+ poll_time: float=15,
+ last_log_id: Optional[int]=None,
+ ) -> None:
+ """Initialize a polling client
+
+ Constructor arguments:
+
+ * api: arvados.api_resources.ArvadosAPIClient --- The Arvados API
+ client used to query logs. It will be used in a separate thread,
+ so if it is not an instance of `arvados.safeapi.ThreadSafeApiCache`
+ it should not be reused after the thread is started.
+
+ * filters: arvados.events.Filter | None --- One event filter to
+ subscribe to after connecting to the WebSocket server. If not
+ specified, the client will subscribe to all events.
+
+ * on_event: arvados.events.EventCallback --- When the client
+ receives an event from the WebSocket server, it calls this
+ function with the event object.
+
+ * poll_time: float --- The number of seconds to wait between querying
+ logs. Default 15.
+
+ * last_log_id: int | None --- If specified, queries will include a
+ filter for logs with an `id` at least this value.
+ """
super(PollClient, self).__init__()
self.api = api
if filters:
@@ -174,6 +341,11 @@ class PollClient(threading.Thread):
self._skip_old_events = False
def run(self):
+ """Run the client loop
+
+ This method runs in a separate thread to poll and process events
+ from the server.
+ """
self.on_event({'status': 200})
while not self._closing.is_set():
@@ -262,23 +434,29 @@ class PollClient(threading.Thread):
self._closing.wait(self.poll_time)
def run_forever(self):
+ """Run the polling client indefinitely
+
+ This method blocks until the `close` method is called (e.g., from
+ another thread) or the client permanently loses its connection.
+ """
# Have to poll here, otherwise KeyboardInterrupt will never get processed.
while not self._closing.is_set():
self._closing.wait(1)
- def close(self, code=None, reason=None, timeout=0):
- """Close poll client and optionally wait for it to finish.
+ def close(self, code: Optional[int]=None, reason: Optional[str]=None, timeout: float=0) -> None:
+ """Stop polling and processing events
- If an :on_event: handler is running in a different thread,
- first wait (indefinitely) for it to return.
+ Arguments:
- After closing, wait up to :timeout: seconds for the thread to
- finish the poll request in progress (if any).
+ * code: Optional[int] --- Ignored; this argument exists for API
+ compatibility with `EventClient.close`.
- :code: and :reason: are ignored. They are present for
- interface compatibility with EventClient.
- """
+ * reason: Optional[str] --- Ignored; this argument exists for API
+ compatibility with `EventClient.close`.
+ * timeout: float --- How long to wait for the client thread to finish
+ processing events. Default 0, which means no timeout.
+ """
with self._closing_lock:
self._closing.set()
try:
@@ -290,11 +468,27 @@ class PollClient(threading.Thread):
# to do so raises the same exception."
pass
- def subscribe(self, f):
+ def subscribe(self, f: Filter, last_log_id: Optional[int]=None) -> None:
+ """Subscribe to another set of events from the server
+
+ Arguments:
+
+ * f: arvados.events.Filter | None --- One filter to subscribe to.
+
+ * last_log_id: Optional[int] --- Ignored; this argument exists for
+ API compatibility with `EventClient.subscribe`.
+ """
self.on_event({'status': 200})
self.filters.append(f)
def unsubscribe(self, f):
+ """Unsubscribe from an event stream
+
+ Arguments:
+
+ * f: arvados.events.Filter | None --- One event filter to stop
+ receiving events for.
+ """
del self.filters[self.filters.index(f)]
@@ -312,21 +506,42 @@ def _subscribe_websocket(api, filters, on_event, last_log_id=None):
else:
return client
-
-def subscribe(api, filters, on_event, poll_fallback=15, last_log_id=None):
+def subscribe(
+ api: 'arvados.api_resources.ArvadosAPIClient',
+ filters: Optional[Filter],
+ on_event: EventCallback,
+ poll_fallback: float=15,
+ last_log_id: Optional[int]=None,
+) -> Union[EventClient, PollClient]:
+ """Start a thread to monitor events
+
+ This method tries to construct an `EventClient` to process Arvados
+ events via WebSockets. If that fails, or the
+ `ARVADOS_DISABLE_WEBSOCKETS` flag is set in user configuration, it falls
+ back to constructing a `PollClient` to process the events via API
+ polling.
+
+ Arguments:
+
+ * api: arvados.api_resources.ArvadosAPIClient --- The Arvados API
+ client used to query logs. It may be used in a separate thread,
+ so if it is not an instance of `arvados.safeapi.ThreadSafeApiCache`
+ it should not be reused after this method returns.
+
+ * filters: arvados.events.Filter | None --- One event filter to
+ subscribe to after initializing the client. If not specified, the
+ client will subscribe to all events.
+
+ * on_event: arvados.events.EventCallback --- When the client receives an
+ event, it calls this function with the event object.
+
+ * poll_time: float --- The number of seconds to wait between querying
+ logs. If 0, this function will refuse to construct a `PollClient`.
+ Default 15.
+
+ * last_log_id: int | None --- If specified, start processing events with
+ at least this `id` value.
"""
- :api:
- a client object retrieved from arvados.api(). The caller should not use this client object for anything else after calling subscribe().
- :filters:
- Initial subscription filters.
- :on_event:
- The callback when a message is received.
- :poll_fallback:
- If websockets are not available, fall back to polling every N seconds. If poll_fallback=False, this will return None if websockets are not available.
- :last_log_id:
- Log rows that are newer than the log id
- """
-
if not poll_fallback:
return _subscribe_websocket(api, filters, on_event, last_log_id)
diff --git a/sdk/python/arvados/http_to_keep.py b/sdk/python/arvados/http_to_keep.py
new file mode 100644
index 0000000000..f247afeaff
--- /dev/null
+++ b/sdk/python/arvados/http_to_keep.py
@@ -0,0 +1,374 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import calendar
+import dataclasses
+import datetime
+import email.utils
+import logging
+import re
+import time
+import typing
+import urllib.parse
+
+import pycurl
+
+import arvados
+import arvados.collection
+from arvados._pycurlhelper import PyCurlHelper
+
+logger = logging.getLogger('arvados.http_import')
+
+def _my_formatdate(dt):
+ return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),
+ localtime=False, usegmt=True)
+
+def _my_parsedate(text):
+ parsed = email.utils.parsedate_tz(text)
+ if parsed:
+ if parsed[9]:
+ # Adjust to UTC
+ return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])
+ else:
+ # TZ is zero or missing, assume UTC.
+ return datetime.datetime(*parsed[:6])
+ else:
+ return datetime.datetime(1970, 1, 1)
+
+def _fresh_cache(url, properties, now):
+ pr = properties[url]
+ expires = None
+
+ logger.debug("Checking cache freshness for %s using %s", url, pr)
+
+ if "Cache-Control" in pr:
+ if re.match(r"immutable", pr["Cache-Control"]):
+ return True
+
+ g = re.match(r"(s-maxage|max-age)=(\d+)", pr["Cache-Control"])
+ if g:
+ expires = _my_parsedate(pr["Date"]) + datetime.timedelta(seconds=int(g.group(2)))
+
+ if expires is None and "Expires" in pr:
+ expires = _my_parsedate(pr["Expires"])
+
+ if expires is None:
+ # Use a default cache time of 24 hours if upstream didn't set
+ # any cache headers, to reduce redundant downloads.
+ expires = _my_parsedate(pr["Date"]) + datetime.timedelta(hours=24)
+
+ if not expires:
+ return False
+
+ return (now < expires)
+
+def _remember_headers(url, properties, headers, now):
+ properties.setdefault(url, {})
+ for h in ("Cache-Control", "Etag", "Expires", "Date", "Content-Length"):
+ if h in headers:
+ properties[url][h] = headers[h]
+ if "Date" not in headers:
+ properties[url]["Date"] = _my_formatdate(now)
+
+@dataclasses.dataclass
+class _Response:
+ status_code: int
+ headers: typing.Mapping[str, str]
+
+
+class _Downloader(PyCurlHelper):
+ # Wait up to 60 seconds for connection
+ # How long it can be in "low bandwidth" state before it gives up
+ # Low bandwidth threshold is 32 KiB/s
+ DOWNLOADER_TIMEOUT = (60, 300, 32768)
+
+ def __init__(self, apiclient):
+ super(_Downloader, self).__init__(title_case_headers=True)
+ self.curl = pycurl.Curl()
+ self.curl.setopt(pycurl.NOSIGNAL, 1)
+ self.curl.setopt(pycurl.OPENSOCKETFUNCTION,
+ lambda *args, **kwargs: self._socket_open(*args, **kwargs))
+ self.target = None
+ self.apiclient = apiclient
+
+ def head(self, url):
+ get_headers = {'Accept': 'application/octet-stream'}
+ self._headers = {}
+
+ self.curl.setopt(pycurl.URL, url.encode('utf-8'))
+ self.curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in get_headers.items()])
+
+ self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+ self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
+ self.curl.setopt(pycurl.NOBODY, True)
+ self.curl.setopt(pycurl.FOLLOWLOCATION, True)
+
+ self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, True)
+
+ try:
+ self.curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ finally:
+ if self._socket:
+ self._socket.close()
+ self._socket = None
+
+ return _Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)
+
+ def download(self, url, headers):
+ self.count = 0
+ self.start = time.time()
+ self.checkpoint = self.start
+ self._headers = {}
+ self._first_chunk = True
+ self.collection = None
+ self.parsedurl = urllib.parse.urlparse(url)
+
+ get_headers = {'Accept': 'application/octet-stream'}
+ get_headers.update(headers)
+
+ self.curl.setopt(pycurl.URL, url.encode('utf-8'))
+ self.curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in get_headers.items()])
+
+ self.curl.setopt(pycurl.WRITEFUNCTION, self.body_write)
+ self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+
+ self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
+ self.curl.setopt(pycurl.HTTPGET, True)
+ self.curl.setopt(pycurl.FOLLOWLOCATION, True)
+
+ self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, False)
+
+ try:
+ self.curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ finally:
+ if self._socket:
+ self._socket.close()
+ self._socket = None
+
+ return _Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)
+
+ def headers_received(self):
+ self.collection = arvados.collection.Collection(api_client=self.apiclient)
+
+ if "Content-Length" in self._headers:
+ self.contentlength = int(self._headers["Content-Length"])
+ logger.info("File size is %s bytes", self.contentlength)
+ else:
+ self.contentlength = None
+
+ if self._headers.get("Content-Disposition"):
+ grp = re.search(r'filename=("((\"|[^"])+)"|([^][()<>@,;:\"/?={} ]+))',
+ self._headers["Content-Disposition"])
+ if grp.group(2):
+ self.name = grp.group(2)
+ else:
+ self.name = grp.group(4)
+ else:
+ self.name = self.parsedurl.path.split("/")[-1]
+
+ # Can't call curl.getinfo(pycurl.RESPONSE_CODE) until
+ # perform() is done but we need to know the status before that
+ # so we have to parse the status line ourselves.
+ mt = re.match(r'^HTTP\/(\d(\.\d)?) ([1-5]\d\d) ([^\r\n\x00-\x08\x0b\x0c\x0e-\x1f\x7f]*)\r\n$', self._headers["x-status-line"])
+ code = int(mt.group(3))
+
+ if not self.name:
+ logger.error("Cannot determine filename from URL or headers")
+ return
+
+ if code == 200:
+ self.target = self.collection.open(self.name, "wb")
+
+ def body_write(self, chunk):
+ if self._first_chunk:
+ self.headers_received()
+ self._first_chunk = False
+
+ self.count += len(chunk)
+
+ if self.target is None:
+ # "If this number is not equal to the size of the byte
+ # string, this signifies an error and libcurl will abort
+ # the request."
+ return 0
+
+ self.target.write(chunk)
+ loopnow = time.time()
+ if (loopnow - self.checkpoint) < 20:
+ return
+
+ bps = self.count / (loopnow - self.start)
+ if self.contentlength is not None:
+ logger.info("%2.1f%% complete, %6.2f MiB/s, %1.0f seconds left",
+ ((self.count * 100) / self.contentlength),
+ (bps / (1024.0*1024.0)),
+ ((self.contentlength-self.count) // bps))
+ else:
+ logger.info("%d downloaded, %6.2f MiB/s", count, (bps / (1024.0*1024.0)))
+ self.checkpoint = loopnow
+
+
+def _changed(url, clean_url, properties, now, curldownloader):
+ req = curldownloader.head(url)
+
+ if req.status_code != 200:
+ # Sometimes endpoints are misconfigured and will deny HEAD but
+ # allow GET so instead of failing here, we'll try GET If-None-Match
+ return True
+
+ # previous version of this code used "ETag", now we are
+ # normalizing to "Etag", check for both.
+ etag = properties[url].get("Etag") or properties[url].get("ETag")
+
+ if url in properties:
+ del properties[url]
+ _remember_headers(clean_url, properties, req.headers, now)
+
+ if "Etag" in req.headers and etag == req.headers["Etag"]:
+ # Didn't change
+ return False
+
+ return True
+
+def _etag_quote(etag):
+ # if it already has leading and trailing quotes, do nothing
+ if etag[0] == '"' and etag[-1] == '"':
+ return etag
+ else:
+ # Add quotes.
+ return '"' + etag + '"'
+
+
+def check_cached_url(api, project_uuid, url, etags,
+ utcnow=datetime.datetime.utcnow,
+ varying_url_params="",
+ prefer_cached_downloads=False):
+
+ logger.info("Checking Keep for %s", url)
+
+ varying_params = [s.strip() for s in varying_url_params.split(",")]
+
+ parsed = urllib.parse.urlparse(url)
+ query = [q for q in urllib.parse.parse_qsl(parsed.query)
+ if q[0] not in varying_params]
+
+ clean_url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params,
+ urllib.parse.urlencode(query, safe="/"), parsed.fragment))
+
+ r1 = api.collections().list(filters=[["properties", "exists", url]]).execute()
+
+ if clean_url == url:
+ items = r1["items"]
+ else:
+ r2 = api.collections().list(filters=[["properties", "exists", clean_url]]).execute()
+ items = r1["items"] + r2["items"]
+
+ now = utcnow()
+
+ curldownloader = _Downloader(api)
+
+ for item in items:
+ properties = item["properties"]
+
+ if clean_url in properties:
+ cache_url = clean_url
+ elif url in properties:
+ cache_url = url
+ else:
+ raise Exception("Shouldn't happen, got an API result for %s that doesn't have the URL in properties" % item["uuid"])
+
+ if prefer_cached_downloads or _fresh_cache(cache_url, properties, now):
+ # HTTP caching rules say we should use the cache
+ cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+ return (item["portable_data_hash"], next(iter(cr.keys())), item["uuid"], clean_url, now)
+
+ if not _changed(cache_url, clean_url, properties, now, curldownloader):
+ # Etag didn't change, same content, just update headers
+ api.collections().update(uuid=item["uuid"], body={"collection":{"properties": properties}}).execute()
+ cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+ return (item["portable_data_hash"], next(iter(cr.keys())), item["uuid"], clean_url, now)
+
+ for etagstr in ("Etag", "ETag"):
+ if etagstr in properties[cache_url] and len(properties[cache_url][etagstr]) > 2:
+ etags[properties[cache_url][etagstr]] = item
+
+ logger.debug("Found ETag values %s", etags)
+
+ return (None, None, None, clean_url, now)
+
+
+def http_to_keep(api, project_uuid, url,
+ utcnow=datetime.datetime.utcnow, varying_url_params="",
+ prefer_cached_downloads=False):
+ """Download a file over HTTP and upload it to keep, with HTTP headers as metadata.
+
+ Before downloading the URL, checks to see if the URL already
+ exists in Keep and applies HTTP caching policy, the
+ varying_url_params and prefer_cached_downloads flags in order to
+ decide whether to use the version in Keep or re-download it.
+ """
+
+ etags = {}
+ cache_result = check_cached_url(api, project_uuid, url, etags,
+ utcnow, varying_url_params,
+ prefer_cached_downloads)
+
+ if cache_result[0] is not None:
+ return cache_result
+
+ clean_url = cache_result[3]
+ now = cache_result[4]
+
+ properties = {}
+ headers = {}
+ if etags:
+ headers['If-None-Match'] = ', '.join([_etag_quote(k) for k,v in etags.items()])
+ logger.debug("Sending GET request with headers %s", headers)
+
+ logger.info("Beginning download of %s", url)
+
+ curldownloader = _Downloader(api)
+
+ req = curldownloader.download(url, headers)
+
+ c = curldownloader.collection
+
+ if req.status_code not in (200, 304):
+ raise Exception("Failed to download '%s' got status %s " % (url, req.status_code))
+
+ if curldownloader.target is not None:
+ curldownloader.target.close()
+
+ _remember_headers(clean_url, properties, req.headers, now)
+
+ if req.status_code == 304 and "Etag" in req.headers and req.headers["Etag"] in etags:
+ item = etags[req.headers["Etag"]]
+ item["properties"].update(properties)
+ api.collections().update(uuid=item["uuid"], body={"collection":{"properties": item["properties"]}}).execute()
+ cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+ return (item["portable_data_hash"], list(cr.keys())[0], item["uuid"], clean_url, now)
+
+ logger.info("Download complete")
+
+ collectionname = "Downloaded from %s" % urllib.parse.quote(clean_url, safe='')
+
+ # max length - space to add a timestamp used by ensure_unique_name
+ max_name_len = 254 - 28
+
+ if len(collectionname) > max_name_len:
+ over = len(collectionname) - max_name_len
+ split = int(max_name_len/2)
+ collectionname = collectionname[0:split] + "â¦" + collectionname[split+over:]
+
+ c.save_new(name=collectionname, owner_uuid=project_uuid, ensure_unique_name=True)
+
+ api.collections().update(uuid=c.manifest_locator(), body={"collection":{"properties": properties}}).execute()
+
+ return (c.portable_data_hash(), curldownloader.name, c.manifest_locator(), clean_url, now)
diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py
index 7c05cc0a6a..d1be6b931e 100644
--- a/sdk/python/arvados/keep.py
+++ b/sdk/python/arvados/keep.py
@@ -15,6 +15,7 @@ from builtins import object
import collections
import datetime
import hashlib
+import errno
import io
import logging
import math
@@ -26,8 +27,11 @@ import socket
import ssl
import sys
import threading
+import resource
from . import timer
import urllib.parse
+import traceback
+import weakref
if sys.version_info >= (3, 0):
from io import BytesIO
@@ -39,6 +43,8 @@ import arvados.config as config
import arvados.errors
import arvados.retry as retry
import arvados.util
+import arvados.diskcache
+from arvados._pycurlhelper import PyCurlHelper
_logger = logging.getLogger('arvados.keep')
global_client_object = None
@@ -159,7 +165,6 @@ class Keep(object):
config.get('ARVADOS_API_TOKEN'),
config.flag_is_true('ARVADOS_API_HOST_INSECURE'),
config.get('ARVADOS_KEEP_PROXY'),
- config.get('ARVADOS_EXTERNAL_CLIENT') == 'true',
os.environ.get('KEEP_LOCAL_STORE'))
if (global_client_object is None) or (cls._last_key != key):
global_client_object = KeepClient()
@@ -175,11 +180,65 @@ class Keep(object):
return Keep.global_client_object().put(data, **kwargs)
class KeepBlockCache(object):
- # Default RAM cache is 256MiB
- def __init__(self, cache_max=(256 * 1024 * 1024)):
+ def __init__(self, cache_max=0, max_slots=0, disk_cache=False, disk_cache_dir=None):
self.cache_max = cache_max
- self._cache = []
+ self._cache = collections.OrderedDict()
self._cache_lock = threading.Lock()
+ self._max_slots = max_slots
+ self._disk_cache = disk_cache
+ self._disk_cache_dir = disk_cache_dir
+ self._cache_updating = threading.Condition(self._cache_lock)
+
+ if self._disk_cache and self._disk_cache_dir is None:
+ self._disk_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "arvados", "keep")
+ os.makedirs(self._disk_cache_dir, mode=0o700, exist_ok=True)
+
+ if self._max_slots == 0:
+ if self._disk_cache:
+ # Each block uses two file descriptors, one used to
+ # open it initially and hold the flock(), and a second
+ # hidden one used by mmap().
+ #
+ # Set max slots to 1/8 of maximum file handles. This
+ # means we'll use at most 1/4 of total file handles.
+ #
+ # NOFILE typically defaults to 1024 on Linux so this
+ # is 128 slots (256 file handles), which means we can
+ # cache up to 8 GiB of 64 MiB blocks. This leaves
+ # 768 file handles for sockets and other stuff.
+ #
+ # When we want the ability to have more cache (e.g. in
+ # arv-mount) we'll increase rlimit before calling
+ # this.
+ self._max_slots = int(resource.getrlimit(resource.RLIMIT_NOFILE)[0] / 8)
+ else:
+ # RAM cache slots
+ self._max_slots = 512
+
+ if self.cache_max == 0:
+ if self._disk_cache:
+ fs = os.statvfs(self._disk_cache_dir)
+ # Calculation of available space incorporates existing cache usage
+ existing_usage = arvados.diskcache.DiskCacheSlot.cache_usage(self._disk_cache_dir)
+ avail = (fs.f_bavail * fs.f_bsize + existing_usage) / 4
+ maxdisk = int((fs.f_blocks * fs.f_bsize) * 0.10)
+ # pick smallest of:
+ # 10% of total disk size
+ # 25% of available space
+ # max_slots * 64 MiB
+ self.cache_max = min(min(maxdisk, avail), (self._max_slots * 64 * 1024 * 1024))
+ else:
+ # 256 MiB in RAM
+ self.cache_max = (256 * 1024 * 1024)
+
+ self.cache_max = max(self.cache_max, 64 * 1024 * 1024)
+
+ self.cache_total = 0
+ if self._disk_cache:
+ self._cache = arvados.diskcache.DiskCacheSlot.init_cache(self._disk_cache_dir, self._max_slots)
+ for slot in self._cache.values():
+ self.cache_total += slot.size()
+ self.cap_cache()
class CacheSlot(object):
__slots__ = ("locator", "ready", "content")
@@ -194,8 +253,11 @@ class KeepBlockCache(object):
return self.content
def set(self, value):
+ if self.content is not None:
+ return False
self.content = value
self.ready.set()
+ return True
def size(self):
if self.content is None:
@@ -203,29 +265,50 @@ class KeepBlockCache(object):
else:
return len(self.content)
+ def evict(self):
+ self.content = None
+
+
+ def _resize_cache(self, cache_max, max_slots):
+ # Try and make sure the contents of the cache do not exceed
+ # the supplied maximums.
+
+ if self.cache_total <= cache_max and len(self._cache) <= max_slots:
+ return
+
+ _evict_candidates = collections.deque(self._cache.values())
+ while _evict_candidates and (self.cache_total > cache_max or len(self._cache) > max_slots):
+ slot = _evict_candidates.popleft()
+ if not slot.ready.is_set():
+ continue
+
+ sz = slot.size()
+ slot.evict()
+ self.cache_total -= sz
+ del self._cache[slot.locator]
+
+
def cap_cache(self):
'''Cap the cache size to self.cache_max'''
- with self._cache_lock:
- # Select all slots except those where ready.is_set() and content is
- # None (that means there was an error reading the block).
- self._cache = [c for c in self._cache if not (c.ready.is_set() and c.content is None)]
- sm = sum([slot.size() for slot in self._cache])
- while len(self._cache) > 0 and sm > self.cache_max:
- for i in range(len(self._cache)-1, -1, -1):
- if self._cache[i].ready.is_set():
- del self._cache[i]
- break
- sm = sum([slot.size() for slot in self._cache])
+ with self._cache_updating:
+ self._resize_cache(self.cache_max, self._max_slots)
+ self._cache_updating.notify_all()
def _get(self, locator):
# Test if the locator is already in the cache
- for i in range(0, len(self._cache)):
- if self._cache[i].locator == locator:
- n = self._cache[i]
- if i != 0:
- # move it to the front
- del self._cache[i]
- self._cache.insert(0, n)
+ if locator in self._cache:
+ n = self._cache[locator]
+ if n.ready.is_set() and n.content is None:
+ del self._cache[n.locator]
+ return None
+ self._cache.move_to_end(locator)
+ return n
+ if self._disk_cache:
+ # see if it exists on disk
+ n = arvados.diskcache.DiskCacheSlot.get_from_disk(locator, self._disk_cache_dir)
+ if n is not None:
+ self._cache[n.locator] = n
+ self.cache_total += n.size()
return n
return None
@@ -236,16 +319,68 @@ class KeepBlockCache(object):
def reserve_cache(self, locator):
'''Reserve a cache slot for the specified locator,
or return the existing slot.'''
- with self._cache_lock:
+ with self._cache_updating:
n = self._get(locator)
if n:
return n, False
else:
# Add a new cache slot for the locator
- n = KeepBlockCache.CacheSlot(locator)
- self._cache.insert(0, n)
+ self._resize_cache(self.cache_max, self._max_slots-1)
+ while len(self._cache) >= self._max_slots:
+ # If there isn't a slot available, need to wait
+ # for something to happen that releases one of the
+ # cache slots. Idle for 200 ms or woken up by
+ # another thread
+ self._cache_updating.wait(timeout=0.2)
+ self._resize_cache(self.cache_max, self._max_slots-1)
+
+ if self._disk_cache:
+ n = arvados.diskcache.DiskCacheSlot(locator, self._disk_cache_dir)
+ else:
+ n = KeepBlockCache.CacheSlot(locator)
+ self._cache[n.locator] = n
return n, True
+ def set(self, slot, blob):
+ try:
+ if slot.set(blob):
+ self.cache_total += slot.size()
+ return
+ except OSError as e:
+ if e.errno == errno.ENOMEM:
+ # Reduce max slots to current - 4, cap cache and retry
+ with self._cache_lock:
+ self._max_slots = max(4, len(self._cache) - 4)
+ elif e.errno == errno.ENOSPC:
+ # Reduce disk max space to current - 256 MiB, cap cache and retry
+ with self._cache_lock:
+ sm = sum(st.size() for st in self._cache.values())
+ self.cache_max = max((256 * 1024 * 1024), sm - (256 * 1024 * 1024))
+ elif e.errno == errno.ENODEV:
+ _logger.error("Unable to use disk cache: The underlying filesystem does not support memory mapping.")
+ except Exception as e:
+ pass
+ finally:
+ # Check if we should evict things from the cache. Either
+ # because we added a new thing or there was an error and
+ # we possibly adjusted the limits down, so we might need
+ # to push something out.
+ self.cap_cache()
+
+ try:
+ # Only gets here if there was an error the first time. The
+ # exception handler adjusts limits downward in some cases
+ # to free up resources, which would make the operation
+ # succeed.
+ if slot.set(blob):
+ self.cache_total += slot.size()
+ except Exception as e:
+ # It failed again. Give up.
+ slot.set(None)
+ raise arvados.errors.KeepCacheError("Unable to save block %s to disk cache: %s" % (slot.locator, e))
+
+ self.cap_cache()
+
class Counter(object):
def __init__(self, v=0):
self._lk = threading.Lock()
@@ -261,18 +396,10 @@ class Counter(object):
class KeepClient(object):
+ DEFAULT_TIMEOUT = PyCurlHelper.DEFAULT_TIMEOUT
+ DEFAULT_PROXY_TIMEOUT = PyCurlHelper.DEFAULT_PROXY_TIMEOUT
- # Default Keep server connection timeout: 2 seconds
- # Default Keep server read timeout: 256 seconds
- # Default Keep server bandwidth minimum: 32768 bytes per second
- # Default Keep proxy connection timeout: 20 seconds
- # Default Keep proxy read timeout: 256 seconds
- # Default Keep proxy bandwidth minimum: 32768 bytes per second
- DEFAULT_TIMEOUT = (2, 256, 32768)
- DEFAULT_PROXY_TIMEOUT = (20, 256, 32768)
-
-
- class KeepService(object):
+ class KeepService(PyCurlHelper):
"""Make requests to a single Keep service, and track results.
A KeepService is intended to last long enough to perform one
@@ -295,6 +422,7 @@ class KeepClient(object):
download_counter=None,
headers={},
insecure=False):
+ super(KeepClient.KeepService, self).__init__()
self.root = root
self._user_agent_pool = user_agent_pool
self._result = {'error': None}
@@ -332,30 +460,6 @@ class KeepClient(object):
except:
ua.close()
- def _socket_open(self, *args, **kwargs):
- if len(args) + len(kwargs) == 2:
- return self._socket_open_pycurl_7_21_5(*args, **kwargs)
- else:
- return self._socket_open_pycurl_7_19_3(*args, **kwargs)
-
- def _socket_open_pycurl_7_19_3(self, family, socktype, protocol, address=None):
- return self._socket_open_pycurl_7_21_5(
- purpose=None,
- address=collections.namedtuple(
- 'Address', ['family', 'socktype', 'protocol', 'addr'],
- )(family, socktype, protocol, address))
-
- def _socket_open_pycurl_7_21_5(self, purpose, address):
- """Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE"""
- s = socket.socket(address.family, address.socktype, address.protocol)
- s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
- # Will throw invalid protocol error on mac. This test prevents that.
- if hasattr(socket, 'TCP_KEEPIDLE'):
- s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
- s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
- self._socket = s
- return s
-
def get(self, locator, method="GET", timeout=None):
# locator is a KeepLocator object.
url = self.root + str(locator)
@@ -381,6 +485,8 @@ class KeepClient(object):
curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
if method == "HEAD":
curl.setopt(pycurl.NOBODY, True)
+ else:
+ curl.setopt(pycurl.HTTPGET, True)
self._setcurltimeouts(curl, timeout, method=="HEAD")
try:
@@ -525,43 +631,6 @@ class KeepClient(object):
self.upload_counter.add(len(body))
return True
- def _setcurltimeouts(self, curl, timeouts, ignore_bandwidth=False):
- if not timeouts:
- return
- elif isinstance(timeouts, tuple):
- if len(timeouts) == 2:
- conn_t, xfer_t = timeouts
- bandwidth_bps = KeepClient.DEFAULT_TIMEOUT[2]
- else:
- conn_t, xfer_t, bandwidth_bps = timeouts
- else:
- conn_t, xfer_t = (timeouts, timeouts)
- bandwidth_bps = KeepClient.DEFAULT_TIMEOUT[2]
- curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(conn_t*1000))
- if not ignore_bandwidth:
- curl.setopt(pycurl.LOW_SPEED_TIME, int(math.ceil(xfer_t)))
- curl.setopt(pycurl.LOW_SPEED_LIMIT, int(math.ceil(bandwidth_bps)))
-
- def _headerfunction(self, header_line):
- if isinstance(header_line, bytes):
- header_line = header_line.decode('iso-8859-1')
- if ':' in header_line:
- name, value = header_line.split(':', 1)
- name = name.strip().lower()
- value = value.strip()
- elif self._headers:
- name = self._lastheadername
- value = self._headers[name] + ' ' + header_line.strip()
- elif header_line.startswith('HTTP/'):
- name = 'x-status-line'
- value = header_line
- else:
- _logger.error("Unexpected header line: %s", header_line)
- return
- self._lastheadername = name
- self._headers[name] = value
- # Returning None implies all bytes were written
-
class KeepWriterQueue(queue.Queue):
def __init__(self, copies, classes=[]):
@@ -756,7 +825,7 @@ class KeepClient(object):
def __init__(self, api_client=None, proxy=None,
timeout=DEFAULT_TIMEOUT, proxy_timeout=DEFAULT_PROXY_TIMEOUT,
api_token=None, local_store=None, block_cache=None,
- num_retries=0, session=None):
+ num_retries=10, session=None, num_prefetch_threads=None):
"""Initialize a new KeepClient.
Arguments:
@@ -809,7 +878,7 @@ class KeepClient(object):
:num_retries:
The default number of times to retry failed requests.
This will be used as the default num_retries value when get() and
- put() are called. Default 0.
+ put() are called. Default 10.
"""
self.lock = threading.Lock()
if proxy is None:
@@ -845,6 +914,12 @@ class KeepClient(object):
self.misses_counter = Counter()
self._storage_classes_unsupported_warning = False
self._default_classes = []
+ if num_prefetch_threads is not None:
+ self.num_prefetch_threads = num_prefetch_threads
+ else:
+ self.num_prefetch_threads = 2
+ self._prefetch_queue = None
+ self._prefetch_threads = None
if local_store:
self.local_store = local_store
@@ -1095,21 +1170,39 @@ class KeepClient(object):
try:
locator = KeepLocator(loc_s)
if method == "GET":
- slot, first = self.block_cache.reserve_cache(locator.md5sum)
- if not first:
+ while slot is None:
+ slot, first = self.block_cache.reserve_cache(locator.md5sum)
+ if first:
+ # Fresh and empty "first time it is used" slot
+ break
if prefetch:
- # this is request for a prefetch, if it is
- # already in flight, return immediately.
- # clear 'slot' to prevent finally block from
- # calling slot.set()
+ # this is request for a prefetch to fill in
+ # the cache, don't need to wait for the
+ # result, so if it is already in flight return
+ # immediately. Clear 'slot' to prevent
+ # finally block from calling slot.set()
+ if slot.ready.is_set():
+ slot.get()
slot = None
return None
- self.hits_counter.add(1)
+
blob = slot.get()
- if blob is None:
- raise arvados.errors.KeepReadError(
- "failed to read {}".format(loc_s))
- return blob
+ if blob is not None:
+ self.hits_counter.add(1)
+ return blob
+
+ # If blob is None, this means either
+ #
+ # (a) another thread was fetching this block and
+ # failed with an error or
+ #
+ # (b) cache thrashing caused the slot to be
+ # evicted (content set to None) by another thread
+ # between the call to reserve_cache() and get().
+ #
+ # We'll handle these cases by reserving a new slot
+ # and then doing a full GET request.
+ slot = None
self.misses_counter.add(1)
@@ -1171,8 +1264,7 @@ class KeepClient(object):
return blob
finally:
if slot is not None:
- slot.set(blob)
- self.block_cache.cap_cache()
+ self.block_cache.set(slot, blob)
# Q: Including 403 is necessary for the Keep tests to continue
# passing, but maybe they should expect KeepReadError instead?
@@ -1297,6 +1389,54 @@ class KeepClient(object):
"[{}] failed to write {} after {} (wanted {} copies but wrote {})".format(
request_id, data_hash, loop.attempts_str(), (copies, classes), writer_pool.done()), service_errors, label="service")
+ def _block_prefetch_worker(self):
+ """The background downloader thread."""
+ while True:
+ try:
+ b = self._prefetch_queue.get()
+ if b is None:
+ return
+ self.get(b, prefetch=True)
+ except Exception:
+ _logger.exception("Exception doing block prefetch")
+
+ def _start_prefetch_threads(self):
+ if self._prefetch_threads is None:
+ with self.lock:
+ if self._prefetch_threads is not None:
+ return
+ self._prefetch_queue = queue.Queue()
+ self._prefetch_threads = []
+ for i in range(0, self.num_prefetch_threads):
+ thread = threading.Thread(target=self._block_prefetch_worker)
+ self._prefetch_threads.append(thread)
+ thread.daemon = True
+ thread.start()
+
+ def block_prefetch(self, locator):
+ """
+ This relies on the fact that KeepClient implements a block cache,
+ so repeated requests for the same block will not result in repeated
+ downloads (unless the block is evicted from the cache.) This method
+ does not block.
+ """
+
+ if self.block_cache.get(locator) is not None:
+ return
+
+ self._start_prefetch_threads()
+ self._prefetch_queue.put(locator)
+
+ def stop_prefetch_threads(self):
+ with self.lock:
+ if self._prefetch_threads is not None:
+ for t in self._prefetch_threads:
+ self._prefetch_queue.put(None)
+ for t in self._prefetch_threads:
+ t.join()
+ self._prefetch_threads = None
+ self._prefetch_queue = None
+
def local_store_put(self, data, copies=1, num_retries=None, classes=[]):
"""A stub for put().
diff --git a/sdk/python/arvados/logging.py b/sdk/python/arvados/logging.py
new file mode 100644
index 0000000000..c6371f41b9
--- /dev/null
+++ b/sdk/python/arvados/logging.py
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+"""Logging utilities for Arvados clients"""
+
+import logging
+
+log_format = '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s'
+log_date_format = '%Y-%m-%d %H:%M:%S'
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(logging.Formatter(log_format, log_date_format))
+
+class GoogleHTTPClientFilter:
+ """Common googleapiclient.http log filters for Arvados clients
+
+ This filter makes `googleapiclient.http` log messages more useful for
+ typical Arvados applications. Currently it only changes the level of
+ retry messages (to INFO by default), but its functionality may be
+ extended in the future. Typical usage looks like:
+
+ logging.getLogger('googleapiclient.http').addFilter(GoogleHTTPClientFilter())
+ """
+ def __init__(self, *, retry_level='INFO'):
+ self.retry_levelname = retry_level
+ self.retry_levelno = getattr(logging, retry_level)
+
+ def filter(self, record):
+ if record.msg.startswith(('Sleeping ', 'Retry ')):
+ record.levelname = self.retry_levelname
+ record.levelno = self.retry_levelno
+ return True
diff --git a/sdk/python/arvados/retry.py b/sdk/python/arvados/retry.py
index ea4095930f..e9e574f5df 100644
--- a/sdk/python/arvados/retry.py
+++ b/sdk/python/arvados/retry.py
@@ -1,25 +1,45 @@
+"""Utilities to retry operations.
+
+The core of this module is `RetryLoop`, a utility class to retry operations
+that might fail. It can distinguish between temporary and permanent failures;
+provide exponential backoff; and save a series of results.
+
+It also provides utility functions for common operations with `RetryLoop`:
+
+* `check_http_response_success` can be used as a `RetryLoop` `success_check`
+ for HTTP response codes from the Arvados API server.
+* `retry_method` can decorate methods to provide a default `num_retries`
+ keyword argument.
+"""
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
-from builtins import range
-from builtins import object
import functools
import inspect
import pycurl
import time
from collections import deque
+from typing import (
+ Callable,
+ Generic,
+ Optional,
+ TypeVar,
+)
import arvados.errors
_HTTP_SUCCESSES = set(range(200, 300))
-_HTTP_CAN_RETRY = set([408, 409, 422, 423, 500, 502, 503, 504])
+_HTTP_CAN_RETRY = set([408, 409, 423, 500, 502, 503, 504])
-class RetryLoop(object):
+CT = TypeVar('CT', bound=Callable)
+T = TypeVar('T')
+
+class RetryLoop(Generic[T]):
"""Coordinate limited retries of code.
- RetryLoop coordinates a loop that runs until it records a
+ `RetryLoop` coordinates a loop that runs until it records a
successful result or tries too many times, whichever comes first.
Typical use looks like:
@@ -33,30 +53,42 @@ class RetryLoop(object):
loop.save_result(result)
if loop.success():
return loop.last_result()
- """
- def __init__(self, num_retries, success_check=lambda r: True,
- backoff_start=0, backoff_growth=2, save_results=1,
- max_wait=60):
- """Construct a new RetryLoop.
- Arguments:
- * num_retries: The maximum number of times to retry the loop if it
- doesn't succeed. This means the loop could run at most 1+N times.
- * success_check: This is a function that will be called each
- time the loop saves a result. The function should return
- True if the result indicates loop success, False if it
- represents a permanent failure state, and None if the loop
- should continue. If no function is provided, the loop will
- end as soon as it records any result.
- * backoff_start: The number of seconds that must pass before the
- loop's second iteration. Default 0, which disables all waiting.
- * backoff_growth: The wait time multiplier after each iteration.
- Default 2 (i.e., double the wait time each time).
- * save_results: Specify a number to save the last N results
- that the loop recorded. These records are available through
- the results attribute, oldest first. Default 1.
- * max_wait: Maximum number of seconds to wait between retries.
- """
+ Arguments:
+
+ * num_retries: int --- The maximum number of times to retry the loop if
+ it doesn't succeed. This means the loop body could run at most
+ `num_retries + 1` times.
+
+ * success_check: Callable[[T], bool | None] --- This is a function that
+ will be called each time the loop saves a result. The function should
+ return `True` if the result indicates the code succeeded, `False` if
+ it represents a permanent failure, and `None` if it represents a
+ temporary failure. If no function is provided, the loop will end
+ after any result is saved.
+
+ * backoff_start: float --- The number of seconds that must pass before
+ the loop's second iteration. Default 0, which disables all waiting.
+
+ * backoff_growth: float --- The wait time multiplier after each
+ iteration. Default 2 (i.e., double the wait time each time).
+
+ * save_results: int --- Specify a number to store that many saved
+ results from the loop. These are available through the `results`
+ attribute, oldest first. Default 1.
+
+ * max_wait: float --- Maximum number of seconds to wait between
+ retries. Default 60.
+ """
+ def __init__(
+ self,
+ num_retries: int,
+ success_check: Callable[[T], Optional[bool]]=lambda r: True,
+ backoff_start: float=0,
+ backoff_growth: float=2,
+ save_results: int=1,
+ max_wait: float=60
+ ) -> None:
self.tries_left = num_retries + 1
self.check_result = success_check
self.backoff_wait = backoff_start
@@ -68,13 +100,25 @@ class RetryLoop(object):
self._running = None
self._success = None
- def __iter__(self):
+ def __iter__(self) -> 'RetryLoop':
+ """Return an iterator of retries."""
return self
- def running(self):
+ def running(self) -> Optional[bool]:
+ """Return whether this loop is running.
+
+ Returns `None` if the loop has never run, `True` if it is still running,
+ or `False` if it has stoppedâwhether that's because it has saved a
+ successful result, a permanent failure, or has run out of retries.
+ """
return self._running and (self._success is None)
- def __next__(self):
+ def __next__(self) -> int:
+ """Record a loop attempt.
+
+ If the loop is still running, decrements the number of tries left and
+ returns it. Otherwise, raises `StopIteration`.
+ """
if self._running is None:
self._running = True
if (self.tries_left < 1) or not self.running():
@@ -90,12 +134,19 @@ class RetryLoop(object):
self.tries_left -= 1
return self.tries_left
- def save_result(self, result):
+ def save_result(self, result: T) -> None:
"""Record a loop result.
Save the given result, and end the loop if it indicates
- success or permanent failure. See __init__'s documentation
- about success_check to learn how to make that indication.
+ success or permanent failure. See documentation for the `__init__`
+ `success_check` argument to learn how that's indicated.
+
+ Raises `arvados.errors.AssertionError` if called after the loop has
+ already ended.
+
+ Arguments:
+
+ * result: T --- The result from this loop attempt to check and save.
"""
if not self.running():
raise arvados.errors.AssertionError(
@@ -104,54 +155,66 @@ class RetryLoop(object):
self._success = self.check_result(result)
self._attempts += 1
- def success(self):
+ def success(self) -> Optional[bool]:
"""Return the loop's end state.
- Returns True if the loop obtained a successful result, False if it
- encountered permanent failure, or else None.
+ Returns `True` if the loop recorded a successful result, `False` if it
+ recorded permanent failure, or else `None`.
"""
return self._success
- def last_result(self):
- """Return the most recent result the loop recorded."""
+ def last_result(self) -> T:
+ """Return the most recent result the loop saved.
+
+ Raises `arvados.errors.AssertionError` if called before any result has
+ been saved.
+ """
try:
return self.results[-1]
except IndexError:
raise arvados.errors.AssertionError(
"queried loop results before any were recorded")
- def attempts(self):
- """Return the number of attempts that have been made.
+ def attempts(self) -> int:
+ """Return the number of results that have been saved.
- Includes successes and failures."""
+ This count includes all kinds of results: success, permanent failure,
+ and temporary failure.
+ """
return self._attempts
- def attempts_str(self):
- """Human-readable attempts(): 'N attempts' or '1 attempt'"""
+ def attempts_str(self) -> str:
+ """Return a human-friendly string counting saved results.
+
+ This method returns '1 attempt' or 'N attempts', where the number
+ in the string is the number of saved results.
+ """
if self._attempts == 1:
return '1 attempt'
else:
return '{} attempts'.format(self._attempts)
-def check_http_response_success(status_code):
- """Convert an HTTP status code to a loop control flag.
+def check_http_response_success(status_code: int) -> Optional[bool]:
+ """Convert a numeric HTTP status code to a loop control flag.
+
+ This method takes a numeric HTTP status code and returns `True` if
+ the code indicates success, `None` if it indicates temporary
+ failure, and `False` otherwise. You can use this as the
+ `success_check` for a `RetryLoop` that queries the Arvados API server.
+ Specifically:
+
+ * Any 2xx result returns `True`.
- Pass this method a numeric HTTP status code. It returns True if
- the code indicates success, None if it indicates temporary
- failure, and False otherwise. You can use this as the
- success_check for a RetryLoop.
+ * A select few status codes, or any malformed responses, return `None`.
- Implementation details:
- * Any 2xx result returns True.
- * A select few status codes, or any malformed responses, return None.
- 422 Unprocessable Entity is in this category. This may not meet the
- letter of the HTTP specification, but the Arvados API server will
- use it for various server-side problems like database connection
- errors.
- * Everything else returns False. Note that this includes 1xx and
+ * Everything else returns `False`. Note that this includes 1xx and
3xx status codes. They don't indicate success, and you can't
retry those requests verbatim.
+
+ Arguments:
+
+ * status_code: int --- A numeric HTTP response code
"""
if status_code in _HTTP_SUCCESSES:
return True
@@ -162,13 +225,18 @@ def check_http_response_success(status_code):
else:
return None # Get well soon, server.
-def retry_method(orig_func):
+def retry_method(orig_func: CT) -> CT:
"""Provide a default value for a method's num_retries argument.
This is a decorator for instance and class methods that accept a
- num_retries argument, with a None default. When the method is called
- without a value for num_retries, it will be set from the underlying
- instance or class' num_retries attribute.
+ `num_retries` keyword argument, with a `None` default. When the method
+ is called without a value for `num_retries`, this decorator will set it
+ from the `num_retries` attribute of the underlying instance or class.
+
+ Arguments:
+
+ * orig_func: Callable --- A class or instance method that accepts a
+ `num_retries` keyword argument
"""
@functools.wraps(orig_func)
def num_retries_setter(self, *args, **kwargs):
diff --git a/sdk/python/arvados/safeapi.py b/sdk/python/arvados/safeapi.py
index c6e17cae0b..56b92e8f08 100644
--- a/sdk/python/arvados/safeapi.py
+++ b/sdk/python/arvados/safeapi.py
@@ -1,47 +1,81 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""Thread-safe wrapper for an Arvados API client
-from __future__ import absolute_import
+This module provides `ThreadSafeApiCache`, a thread-safe, API-compatible
+Arvados API client.
+"""
-from builtins import object
-import copy
+import sys
import threading
-import arvados
-import arvados.keep as keep
-import arvados.config as config
+from typing import (
+ Any,
+ Mapping,
+ Optional,
+)
+
+from . import config
+from . import keep
+from . import util
+
+api = sys.modules['arvados.api']
class ThreadSafeApiCache(object):
- """Threadsafe wrapper for API objects.
+ """Thread-safe wrapper for an Arvados API client
- This stores and returns a different api object per thread, because httplib2
- which underlies apiclient is not threadsafe.
+ This class takes all the arguments necessary to build a lower-level
+ Arvados API client `googleapiclient.discovery.Resource`, then
+ transparently builds and wraps a unique object per thread. This works
+ around the fact that the client's underlying HTTP client object is not
+ thread-safe.
- """
+ Arguments:
- def __init__(self, apiconfig=None, keep_params={}, api_params={}):
- if apiconfig is None:
- apiconfig = config.settings()
- self.apiconfig = copy.copy(apiconfig)
- self.api_params = api_params
- self.local = threading.local()
+ * apiconfig: Mapping[str, str] | None --- A mapping with entries for
+ `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally
+ `ARVADOS_API_HOST_INSECURE`. If not provided, uses
+ `arvados.config.settings` to get these parameters from user
+ configuration. You can pass an empty mapping to build the client
+ solely from `api_params`.
- # Initialize an API object for this thread before creating
- # KeepClient, this will report if ARVADOS_API_HOST or
- # ARVADOS_API_TOKEN are missing.
- self.localapi()
+ * keep_params: Mapping[str, Any] --- Keyword arguments used to construct
+ an associated `arvados.keep.KeepClient`.
+ * api_params: Mapping[str, Any] --- Keyword arguments used to construct
+ each thread's API client. These have the same meaning as in the
+ `arvados.api.api` function.
+
+ * version: str | None --- A string naming the version of the Arvados API
+ to use. If not specified, the code will log a warning and fall back to
+ `'v1'`.
+ """
+ def __init__(
+ self,
+ apiconfig: Optional[Mapping[str, str]]=None,
+ keep_params: Optional[Mapping[str, Any]]={},
+ api_params: Optional[Mapping[str, Any]]={},
+ version: Optional[str]=None,
+ ) -> None:
+ if apiconfig or apiconfig is None:
+ self._api_kwargs = api.api_kwargs_from_config(version, apiconfig, **api_params)
+ else:
+ self._api_kwargs = api.normalize_api_kwargs(version, **api_params)
+ self.api_token = self._api_kwargs['token']
+ self.request_id = self._api_kwargs.get('request_id')
+ self.local = threading.local()
self.keep = keep.KeepClient(api_client=self, **keep_params)
- def localapi(self):
- if 'api' not in self.local.__dict__:
- self.local.api = arvados.api_from_config('v1', apiconfig=self.apiconfig,
- **self.api_params)
- return self.local.api
+ def localapi(self) -> 'googleapiclient.discovery.Resource':
+ try:
+ client = self.local.api
+ except AttributeError:
+ client = api.api_client(**self._api_kwargs)
+ client._http._request_id = lambda: self.request_id or util.new_request_id()
+ self.local.api = client
+ return client
- def __getattr__(self, name):
+ def __getattr__(self, name: str) -> Any:
# Proxy nonexistent attributes to the thread-local API client.
- if name == "api_token":
- return self.apiconfig['ARVADOS_API_TOKEN']
return getattr(self.localapi(), name)
diff --git a/sdk/python/arvados/stream.py b/sdk/python/arvados/stream.py
index edfb7711b8..37cd5d7db8 100644
--- a/sdk/python/arvados/stream.py
+++ b/sdk/python/arvados/stream.py
@@ -20,11 +20,13 @@ from arvados.retry import retry_method
from arvados.keep import *
from . import config
from . import errors
+from . import util
from ._normalize_stream import normalize_stream
class StreamReader(object):
+ @util._deprecated('3.0', 'arvados.collection.Collecttion')
def __init__(self, tokens, keep=None, debug=False, _empty=False,
- num_retries=0):
+ num_retries=10):
self._stream_name = None
self._data_locators = []
self._files = collections.OrderedDict()
diff --git a/sdk/python/arvados/util.py b/sdk/python/arvados/util.py
index c383d529e8..050c67f68d 100644
--- a/sdk/python/arvados/util.py
+++ b/sdk/python/arvados/util.py
@@ -1,47 +1,410 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+"""Arvados utilities
-from __future__ import division
-from builtins import range
+This module provides functions and constants that are useful across a variety
+of Arvados resource types, or extend the Arvados API client (see `arvados.api`).
+"""
+import errno
import fcntl
+import functools
import hashlib
import httplib2
import os
import random
import re
import subprocess
-import errno
import sys
+import warnings
+
+import arvados.errors
-import arvados
-from arvados.collection import CollectionReader
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterator,
+ TypeVar,
+ Union,
+)
+
+T = TypeVar('T')
HEX_RE = re.compile(r'^[0-9a-fA-F]+$')
+"""Regular expression to match a hexadecimal string (case-insensitive)"""
CR_UNCOMMITTED = 'Uncommitted'
+"""Constant `state` value for uncommited container requests"""
CR_COMMITTED = 'Committed'
+"""Constant `state` value for committed container requests"""
CR_FINAL = 'Final'
+"""Constant `state` value for finalized container requests"""
+
+keep_locator_pattern = re.compile(r'[0-9a-f]{32}\+[0-9]+(\+\S+)*')
+"""Regular expression to match any Keep block locator"""
+signed_locator_pattern = re.compile(r'[0-9a-f]{32}\+[0-9]+(\+\S+)*\+A\S+(\+\S+)*')
+"""Regular expression to match any Keep block locator with an access token hint"""
+portable_data_hash_pattern = re.compile(r'[0-9a-f]{32}\+[0-9]+')
+"""Regular expression to match any collection portable data hash"""
+manifest_pattern = re.compile(r'((\S+)( +[a-f0-9]{32}(\+[0-9]+)(\+\S+)*)+( +[0-9]+:[0-9]+:\S+)+$)+', flags=re.MULTILINE)
+"""Regular expression to match an Arvados collection manifest text"""
+keep_file_locator_pattern = re.compile(r'([0-9a-f]{32}\+[0-9]+)/(.*)')
+"""Regular expression to match a file path from a collection identified by portable data hash"""
+keepuri_pattern = re.compile(r'keep:([0-9a-f]{32}\+[0-9]+)/(.*)')
+"""Regular expression to match a `keep:` URI with a collection identified by portable data hash"""
-keep_locator_pattern = re.compile(r'[0-9a-f]{32}\+\d+(\+\S+)*')
-signed_locator_pattern = re.compile(r'[0-9a-f]{32}\+\d+(\+\S+)*\+A\S+(\+\S+)*')
-portable_data_hash_pattern = re.compile(r'[0-9a-f]{32}\+\d+')
uuid_pattern = re.compile(r'[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}')
+"""Regular expression to match any Arvados object UUID"""
collection_uuid_pattern = re.compile(r'[a-z0-9]{5}-4zz18-[a-z0-9]{15}')
+"""Regular expression to match any Arvados collection UUID"""
+container_uuid_pattern = re.compile(r'[a-z0-9]{5}-dz642-[a-z0-9]{15}')
+"""Regular expression to match any Arvados container UUID"""
group_uuid_pattern = re.compile(r'[a-z0-9]{5}-j7d0g-[a-z0-9]{15}')
-user_uuid_pattern = re.compile(r'[a-z0-9]{5}-tpzed-[a-z0-9]{15}')
+"""Regular expression to match any Arvados group UUID"""
link_uuid_pattern = re.compile(r'[a-z0-9]{5}-o0j2j-[a-z0-9]{15}')
+"""Regular expression to match any Arvados link UUID"""
+user_uuid_pattern = re.compile(r'[a-z0-9]{5}-tpzed-[a-z0-9]{15}')
+"""Regular expression to match any Arvados user UUID"""
job_uuid_pattern = re.compile(r'[a-z0-9]{5}-8i9sb-[a-z0-9]{15}')
-container_uuid_pattern = re.compile(r'[a-z0-9]{5}-dz642-[a-z0-9]{15}')
-manifest_pattern = re.compile(r'((\S+)( +[a-f0-9]{32}(\+\d+)(\+\S+)*)+( +\d+:\d+:\S+)+$)+', flags=re.MULTILINE)
+"""Regular expression to match any Arvados job UUID
+
+.. WARNING:: Deprecated
+ Arvados job resources are deprecated and will be removed in a future
+ release. Prefer the containers API instead.
+"""
+
+def _deprecated(version=None, preferred=None):
+ """Mark a callable as deprecated in the SDK
+
+ This will wrap the callable to emit as a DeprecationWarning
+ and add a deprecation notice to its docstring.
+
+ If the following arguments are given, they'll be included in the
+ notices:
+
+ * preferred: str | None --- The name of an alternative that users should
+ use instead.
+
+ * version: str | None --- The version of Arvados when the callable is
+ scheduled to be removed.
+ """
+ if version is None:
+ version = ''
+ else:
+ version = f' and scheduled to be removed in Arvados {version}'
+ if preferred is None:
+ preferred = ''
+ else:
+ preferred = f' Prefer {preferred} instead.'
+ def deprecated_decorator(func):
+ fullname = f'{func.__module__}.{func.__qualname__}'
+ parent, _, name = fullname.rpartition('.')
+ if name == '__init__':
+ fullname = parent
+ warning_msg = f'{fullname} is deprecated{version}.{preferred}'
+ @functools.wraps(func)
+ def deprecated_wrapper(*args, **kwargs):
+ warnings.warn(warning_msg, DeprecationWarning, 2)
+ return func(*args, **kwargs)
+ # Get func's docstring without any trailing newline or empty lines.
+ func_doc = re.sub(r'\n\s*$', '', func.__doc__ or '')
+ match = re.search(r'\n([ \t]+)\S', func_doc)
+ indent = '' if match is None else match.group(1)
+ warning_doc = f'\n\n{indent}.. WARNING:: Deprecated\n{indent} {warning_msg}'
+ # Make the deprecation notice the second "paragraph" of the
+ # docstring if possible. Otherwise append it.
+ docstring, count = re.subn(
+ rf'\n[ \t]*\n{indent}',
+ f'{warning_doc}\n\n{indent}',
+ func_doc,
+ count=1,
+ )
+ if not count:
+ docstring = f'{func_doc.lstrip()}{warning_doc}'
+ deprecated_wrapper.__doc__ = docstring
+ return deprecated_wrapper
+ return deprecated_decorator
+
+def is_hex(s: str, *length_args: int) -> bool:
+ """Indicate whether a string is a hexadecimal number
+
+ This method returns true if all characters in the string are hexadecimal
+ digits. It is case-insensitive.
+
+ You can also pass optional length arguments to check that the string has
+ the expected number of digits. If you pass one integer, the string must
+ have that length exactly, otherwise the method returns False. If you
+ pass two integers, the string's length must fall within that minimum and
+ maximum (inclusive), otherwise the method returns False.
+
+ Arguments:
+
+ * s: str --- The string to check
+
+ * length_args: int --- Optional length limit(s) for the string to check
+ """
+ num_length_args = len(length_args)
+ if num_length_args > 2:
+ raise arvados.errors.ArgumentError(
+ "is_hex accepts up to 3 arguments ({} given)".format(1 + num_length_args))
+ elif num_length_args == 2:
+ good_len = (length_args[0] <= len(s) <= length_args[1])
+ elif num_length_args == 1:
+ good_len = (len(s) == length_args[0])
+ else:
+ good_len = True
+ return bool(good_len and HEX_RE.match(s))
+
+def keyset_list_all(
+ fn: Callable[..., 'arvados.api_resources.ArvadosAPIRequest'],
+ order_key: str="created_at",
+ num_retries: int=0,
+ ascending: bool=True,
+ **kwargs: Any,
+) -> Iterator[Dict[str, Any]]:
+ """Iterate all Arvados resources from an API list call
+
+ This method takes a method that represents an Arvados API list call, and
+ iterates the objects returned by the API server. It can make multiple API
+ calls to retrieve and iterate all objects available from the API server.
+
+ Arguments:
+
+ * fn: Callable[..., arvados.api_resources.ArvadosAPIRequest] --- A
+ function that wraps an Arvados API method that returns a list of
+ objects. If you have an Arvados API client named `arv`, examples
+ include `arv.collections().list` and `arv.groups().contents`. Note
+ that you should pass the function *without* calling it.
+
+ * order_key: str --- The name of the primary object field that objects
+ should be sorted by. This name is used to build an `order` argument
+ for `fn`. Default `'created_at'`.
+
+ * num_retries: int --- This argument is passed through to
+ `arvados.api_resources.ArvadosAPIRequest.execute` for each API call. See
+ that method's docstring for details. Default 0 (meaning API calls will
+ use the `num_retries` value set when the Arvados API client was
+ constructed).
+
+ * ascending: bool --- Used to build an `order` argument for `fn`. If True,
+ all fields will be sorted in `'asc'` (ascending) order. Otherwise, all
+ fields will be sorted in `'desc'` (descending) order.
+
+ Additional keyword arguments will be passed directly to `fn` for each API
+ call. Note that this function sets `count`, `limit`, and `order` as part of
+ its work.
+ """
+ pagesize = 1000
+ kwargs["limit"] = pagesize
+ kwargs["count"] = 'none'
+ asc = "asc" if ascending else "desc"
+ kwargs["order"] = ["%s %s" % (order_key, asc), "uuid %s" % asc]
+ other_filters = kwargs.get("filters", [])
+
+ try:
+ select = set(kwargs['select'])
+ except KeyError:
+ pass
+ else:
+ select.add(order_key)
+ select.add('uuid')
+ kwargs['select'] = list(select)
+
+ nextpage = []
+ tot = 0
+ expect_full_page = True
+ seen_prevpage = set()
+ seen_thispage = set()
+ lastitem = None
+ prev_page_all_same_order_key = False
+
+ while True:
+ kwargs["filters"] = nextpage+other_filters
+ items = fn(**kwargs).execute(num_retries=num_retries)
+
+ if len(items["items"]) == 0:
+ if prev_page_all_same_order_key:
+ nextpage = [[order_key, ">" if ascending else "<", lastitem[order_key]]]
+ prev_page_all_same_order_key = False
+ continue
+ else:
+ return
+
+ seen_prevpage = seen_thispage
+ seen_thispage = set()
+
+ for i in items["items"]:
+ # In cases where there's more than one record with the
+ # same order key, the result could include records we
+ # already saw in the last page. Skip them.
+ if i["uuid"] in seen_prevpage:
+ continue
+ seen_thispage.add(i["uuid"])
+ yield i
+
+ firstitem = items["items"][0]
+ lastitem = items["items"][-1]
+
+ if firstitem[order_key] == lastitem[order_key]:
+ # Got a page where every item has the same order key.
+ # Switch to using uuid for paging.
+ nextpage = [[order_key, "=", lastitem[order_key]], ["uuid", ">" if ascending else "<", lastitem["uuid"]]]
+ prev_page_all_same_order_key = True
+ else:
+ # Start from the last order key seen, but skip the last
+ # known uuid to avoid retrieving the same row twice. If
+ # there are multiple rows with the same order key it is
+ # still likely we'll end up retrieving duplicate rows.
+ # That's handled by tracking the "seen" rows for each page
+ # so they can be skipped if they show up on the next page.
+ nextpage = [[order_key, ">=" if ascending else "<=", lastitem[order_key]], ["uuid", "!=", lastitem["uuid"]]]
+ prev_page_all_same_order_key = False
+
+def ca_certs_path(fallback: T=httplib2.CA_CERTS) -> Union[str, T]:
+ """Return the path of the best available source of CA certificates
+
+ This function checks various known paths that provide trusted CA
+ certificates, and returns the first one that exists. It checks:
+
+ * the path in the `SSL_CERT_FILE` environment variable (used by OpenSSL)
+ * `/etc/arvados/ca-certificates.crt`, respected by all Arvados software
+ * `/etc/ssl/certs/ca-certificates.crt`, the default store on Debian-based
+ distributions
+ * `/etc/pki/tls/certs/ca-bundle.crt`, the default store on Red Hat-based
+ distributions
+
+ If none of these paths exist, this function returns the value of `fallback`.
+
+ Arguments:
+
+ * fallback: T --- The value to return if none of the known paths exist.
+ The default value is the certificate store of Mozilla's trusted CAs
+ included with the Python [certifi][] package.
+
+ [certifi]: https://pypi.org/project/certifi/
+ """
+ for ca_certs_path in [
+ # SSL_CERT_FILE and SSL_CERT_DIR are openssl overrides - note
+ # that httplib2 itself also supports HTTPLIB2_CA_CERTS.
+ os.environ.get('SSL_CERT_FILE'),
+ # Arvados specific:
+ '/etc/arvados/ca-certificates.crt',
+ # Debian:
+ '/etc/ssl/certs/ca-certificates.crt',
+ # Red Hat:
+ '/etc/pki/tls/certs/ca-bundle.crt',
+ ]:
+ if ca_certs_path and os.path.exists(ca_certs_path):
+ return ca_certs_path
+ return fallback
+
+def new_request_id() -> str:
+ """Return a random request ID
+
+ This function generates and returns a random string suitable for use as a
+ `X-Request-Id` header value in the Arvados API.
+ """
+ rid = "req-"
+ # 2**104 > 36**20 > 2**103
+ n = random.getrandbits(104)
+ for _ in range(20):
+ c = n % 36
+ if c < 10:
+ rid += chr(c+ord('0'))
+ else:
+ rid += chr(c+ord('a')-10)
+ n = n // 36
+ return rid
+
+def get_config_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str, Any]:
+ """Return an Arvados cluster's configuration, with caching
+
+ This function gets and returns the Arvados configuration from the API
+ server. It caches the result on the client object and reuses it on any
+ future calls.
+
+ Arguments:
+
+ * svc: arvados.api_resources.ArvadosAPIClient --- The Arvados API client
+ object to use to retrieve and cache the Arvados cluster configuration.
+ """
+ if not svc._rootDesc.get('resources').get('configs', False):
+ # Old API server version, no config export endpoint
+ return {}
+ if not hasattr(svc, '_cached_config'):
+ svc._cached_config = svc.configs().get().execute()
+ return svc._cached_config
+
+def get_vocabulary_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str, Any]:
+ """Return an Arvados cluster's vocabulary, with caching
+
+ This function gets and returns the Arvados vocabulary from the API
+ server. It caches the result on the client object and reuses it on any
+ future calls.
+
+ .. HINT:: Low-level method
+ This is a relatively low-level wrapper around the Arvados API. Most
+ users will prefer to use `arvados.vocabulary.load_vocabulary`.
+
+ Arguments:
+
+ * svc: arvados.api_resources.ArvadosAPIClient --- The Arvados API client
+ object to use to retrieve and cache the Arvados cluster vocabulary.
+ """
+ if not svc._rootDesc.get('resources').get('vocabularies', False):
+ # Old API server version, no vocabulary export endpoint
+ return {}
+ if not hasattr(svc, '_cached_vocabulary'):
+ svc._cached_vocabulary = svc.vocabularies().get().execute()
+ return svc._cached_vocabulary
+def trim_name(collectionname: str) -> str:
+ """Limit the length of a name to fit within Arvados API limits
+
+ This function ensures that a string is short enough to use as an object
+ name in the Arvados API, leaving room for text that may be added by the
+ `ensure_unique_name` argument. If the source name is short enough, it is
+ returned unchanged. Otherwise, this function returns a string with excess
+ characters removed from the middle of the source string and replaced with
+ an ellipsis.
+
+ Arguments:
+
+ * collectionname: str --- The desired source name
+ """
+ max_name_len = 254 - 28
+
+ if len(collectionname) > max_name_len:
+ over = len(collectionname) - max_name_len
+ split = int(max_name_len/2)
+ collectionname = collectionname[0:split] + "â¦" + collectionname[split+over:]
+
+ return collectionname
+
+@_deprecated('3.0', 'arvados.util.keyset_list_all')
+def list_all(fn, num_retries=0, **kwargs):
+ # Default limit to (effectively) api server's MAX_LIMIT
+ kwargs.setdefault('limit', sys.maxsize)
+ items = []
+ offset = 0
+ items_available = sys.maxsize
+ while len(items) < items_available:
+ c = fn(offset=offset, **kwargs).execute(num_retries=num_retries)
+ items += c['items']
+ items_available = c['items_available']
+ offset = c['offset'] + len(c['items'])
+ return items
+
+@_deprecated('3.0')
def clear_tmpdir(path=None):
"""
Ensure the given directory (or TASK_TMPDIR if none given)
exists and is empty.
"""
+ from arvados import current_task
if path is None:
- path = arvados.current_task().tmpdir
+ path = current_task().tmpdir
if os.path.exists(path):
p = subprocess.Popen(['rm', '-rf', path])
stdout, stderr = p.communicate(None)
@@ -49,6 +412,7 @@ def clear_tmpdir(path=None):
raise Exception('rm -rf %s: %s' % (path, stderr))
os.mkdir(path)
+@_deprecated('3.0', 'subprocess.run')
def run_command(execargs, **kwargs):
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
@@ -63,9 +427,11 @@ def run_command(execargs, **kwargs):
(execargs, p.returncode, stderrdata))
return stdoutdata, stderrdata
+@_deprecated('3.0')
def git_checkout(url, version, path):
+ from arvados import current_job
if not re.search('^/', path):
- path = os.path.join(arvados.current_job().tmpdir, path)
+ path = os.path.join(current_job().tmpdir, path)
if not os.path.exists(path):
run_command(["git", "clone", url, path],
cwd=os.path.dirname(path))
@@ -73,6 +439,7 @@ def git_checkout(url, version, path):
cwd=path)
return path
+@_deprecated('3.0')
def tar_extractor(path, decompress_flag):
return subprocess.Popen(["tar",
"-C", path,
@@ -82,6 +449,7 @@ def tar_extractor(path, decompress_flag):
stdin=subprocess.PIPE, stderr=sys.stderr,
shell=False, close_fds=True)
+@_deprecated('3.0', 'arvados.collection.Collection.open and the tarfile module')
def tarball_extract(tarball, path):
"""Retrieve a tarball from Keep and extract it to a local
directory. Return the absolute path where the tarball was
@@ -92,8 +460,10 @@ def tarball_extract(tarball, path):
tarball -- collection locator
path -- where to extract the tarball: absolute, or relative to job tmp
"""
+ from arvados import current_job
+ from arvados.collection import CollectionReader
if not re.search('^/', path):
- path = os.path.join(arvados.current_job().tmpdir, path)
+ path = os.path.join(current_job().tmpdir, path)
lockfile = open(path + '.lock', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX)
try:
@@ -116,11 +486,12 @@ def tarball_extract(tarball, path):
os.unlink(os.path.join(path, '.locator'))
for f in CollectionReader(tarball).all_files():
- if re.search('\.(tbz|tar.bz2)$', f.name()):
+ f_name = f.name()
+ if f_name.endswith(('.tbz', '.tar.bz2')):
p = tar_extractor(path, 'j')
- elif re.search('\.(tgz|tar.gz)$', f.name()):
+ elif f_name.endswith(('.tgz', '.tar.gz')):
p = tar_extractor(path, 'z')
- elif re.search('\.tar$', f.name()):
+ elif f_name.endswith('.tar'):
p = tar_extractor(path, '')
else:
raise arvados.errors.AssertionError(
@@ -143,6 +514,7 @@ def tarball_extract(tarball, path):
return os.path.join(path, tld_extracts[0])
return path
+@_deprecated('3.0', 'arvados.collection.Collection.open and the zipfile module')
def zipball_extract(zipball, path):
"""Retrieve a zip archive from Keep and extract it to a local
directory. Return the absolute path where the archive was
@@ -153,8 +525,10 @@ def zipball_extract(zipball, path):
zipball -- collection locator
path -- where to extract the archive: absolute, or relative to job tmp
"""
+ from arvados import current_job
+ from arvados.collection import CollectionReader
if not re.search('^/', path):
- path = os.path.join(arvados.current_job().tmpdir, path)
+ path = os.path.join(current_job().tmpdir, path)
lockfile = open(path + '.lock', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX)
try:
@@ -177,7 +551,7 @@ def zipball_extract(zipball, path):
os.unlink(os.path.join(path, '.locator'))
for f in CollectionReader(zipball).all_files():
- if not re.search('\.zip$', f.name()):
+ if not f.name().endswith('.zip'):
raise arvados.errors.NotImplementedError(
"zipball_extract cannot handle filename %s" % f.name())
zip_filename = os.path.join(path, os.path.basename(f.name()))
@@ -209,6 +583,7 @@ def zipball_extract(zipball, path):
return os.path.join(path, tld_extracts[0])
return path
+@_deprecated('3.0', 'arvados.collection.Collection')
def collection_extract(collection, path, files=[], decompress=True):
"""Retrieve a collection from Keep and extract it to a local
directory. Return the absolute path where the collection was
@@ -217,13 +592,15 @@ def collection_extract(collection, path, files=[], decompress=True):
collection -- collection locator
path -- where to extract: absolute, or relative to job tmp
"""
+ from arvados import current_job
+ from arvados.collection import CollectionReader
matches = re.search(r'^([0-9a-f]+)(\+[\w@]+)*$', collection)
if matches:
collection_hash = matches.group(1)
else:
collection_hash = hashlib.md5(collection).hexdigest()
if not re.search('^/', path):
- path = os.path.join(arvados.current_job().tmpdir, path)
+ path = os.path.join(current_job().tmpdir, path)
lockfile = open(path + '.lock', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX)
try:
@@ -272,6 +649,7 @@ def collection_extract(collection, path, files=[], decompress=True):
lockfile.close()
return path
+@_deprecated('3.0', 'pathlib.Path().mkdir(parents=True, exist_ok=True)')
def mkdir_dash_p(path):
if not os.path.isdir(path):
try:
@@ -284,6 +662,7 @@ def mkdir_dash_p(path):
else:
raise
+@_deprecated('3.0', 'arvados.collection.Collection')
def stream_extract(stream, path, files=[], decompress=True):
"""Retrieve a stream from Keep and extract it to a local
directory. Return the absolute path where the stream was
@@ -292,8 +671,9 @@ def stream_extract(stream, path, files=[], decompress=True):
stream -- StreamReader object
path -- where to extract: absolute, or relative to job tmp
"""
+ from arvados import current_job
if not re.search('^/', path):
- path = os.path.join(arvados.current_job().tmpdir, path)
+ path = os.path.join(current_job().tmpdir, path)
lockfile = open(path + '.lock', 'w')
fcntl.flock(lockfile, fcntl.LOCK_EX)
try:
@@ -324,6 +704,7 @@ def stream_extract(stream, path, files=[], decompress=True):
lockfile.close()
return path
+@_deprecated('3.0', 'os.walk')
def listdir_recursive(dirname, base=None, max_depth=None):
"""listdir_recursive(dirname, base, max_depth)
@@ -352,151 +733,3 @@ def listdir_recursive(dirname, base=None, max_depth=None):
else:
allfiles += [ent_base]
return allfiles
-
-def is_hex(s, *length_args):
- """is_hex(s[, length[, max_length]]) -> boolean
-
- Return True if s is a string of hexadecimal digits.
- If one length argument is given, the string must contain exactly
- that number of digits.
- If two length arguments are given, the string must contain a number of
- digits between those two lengths, inclusive.
- Return False otherwise.
- """
- num_length_args = len(length_args)
- if num_length_args > 2:
- raise arvados.errors.ArgumentError(
- "is_hex accepts up to 3 arguments ({} given)".format(1 + num_length_args))
- elif num_length_args == 2:
- good_len = (length_args[0] <= len(s) <= length_args[1])
- elif num_length_args == 1:
- good_len = (len(s) == length_args[0])
- else:
- good_len = True
- return bool(good_len and HEX_RE.match(s))
-
-def list_all(fn, num_retries=0, **kwargs):
- # Default limit to (effectively) api server's MAX_LIMIT
- kwargs.setdefault('limit', sys.maxsize)
- items = []
- offset = 0
- items_available = sys.maxsize
- while len(items) < items_available:
- c = fn(offset=offset, **kwargs).execute(num_retries=num_retries)
- items += c['items']
- items_available = c['items_available']
- offset = c['offset'] + len(c['items'])
- return items
-
-def keyset_list_all(fn, order_key="created_at", num_retries=0, ascending=True, **kwargs):
- pagesize = 1000
- kwargs["limit"] = pagesize
- kwargs["count"] = 'none'
- asc = "asc" if ascending else "desc"
- kwargs["order"] = ["%s %s" % (order_key, asc), "uuid %s" % asc]
- other_filters = kwargs.get("filters", [])
-
- if "select" in kwargs and "uuid" not in kwargs["select"]:
- kwargs["select"].append("uuid")
-
- nextpage = []
- tot = 0
- expect_full_page = True
- seen_prevpage = set()
- seen_thispage = set()
- lastitem = None
- prev_page_all_same_order_key = False
-
- while True:
- kwargs["filters"] = nextpage+other_filters
- items = fn(**kwargs).execute(num_retries=num_retries)
-
- if len(items["items"]) == 0:
- if prev_page_all_same_order_key:
- nextpage = [[order_key, ">" if ascending else "<", lastitem[order_key]]]
- prev_page_all_same_order_key = False
- continue
- else:
- return
-
- seen_prevpage = seen_thispage
- seen_thispage = set()
-
- for i in items["items"]:
- # In cases where there's more than one record with the
- # same order key, the result could include records we
- # already saw in the last page. Skip them.
- if i["uuid"] in seen_prevpage:
- continue
- seen_thispage.add(i["uuid"])
- yield i
-
- firstitem = items["items"][0]
- lastitem = items["items"][-1]
-
- if firstitem[order_key] == lastitem[order_key]:
- # Got a page where every item has the same order key.
- # Switch to using uuid for paging.
- nextpage = [[order_key, "=", lastitem[order_key]], ["uuid", ">" if ascending else "<", lastitem["uuid"]]]
- prev_page_all_same_order_key = True
- else:
- # Start from the last order key seen, but skip the last
- # known uuid to avoid retrieving the same row twice. If
- # there are multiple rows with the same order key it is
- # still likely we'll end up retrieving duplicate rows.
- # That's handled by tracking the "seen" rows for each page
- # so they can be skipped if they show up on the next page.
- nextpage = [[order_key, ">=" if ascending else "<=", lastitem[order_key]], ["uuid", "!=", lastitem["uuid"]]]
- prev_page_all_same_order_key = False
-
-
-def ca_certs_path(fallback=httplib2.CA_CERTS):
- """Return the path of the best available CA certs source.
-
- This function searches for various distribution sources of CA
- certificates, and returns the first it finds. If it doesn't find any,
- it returns the value of `fallback` (httplib2's CA certs by default).
- """
- for ca_certs_path in [
- # SSL_CERT_FILE and SSL_CERT_DIR are openssl overrides - note
- # that httplib2 itself also supports HTTPLIB2_CA_CERTS.
- os.environ.get('SSL_CERT_FILE'),
- # Arvados specific:
- '/etc/arvados/ca-certificates.crt',
- # Debian:
- '/etc/ssl/certs/ca-certificates.crt',
- # Red Hat:
- '/etc/pki/tls/certs/ca-bundle.crt',
- ]:
- if ca_certs_path and os.path.exists(ca_certs_path):
- return ca_certs_path
- return fallback
-
-def new_request_id():
- rid = "req-"
- # 2**104 > 36**20 > 2**103
- n = random.getrandbits(104)
- for _ in range(20):
- c = n % 36
- if c < 10:
- rid += chr(c+ord('0'))
- else:
- rid += chr(c+ord('a')-10)
- n = n // 36
- return rid
-
-def get_config_once(svc):
- if not svc._rootDesc.get('resources').get('configs', False):
- # Old API server version, no config export endpoint
- return {}
- if not hasattr(svc, '_cached_config'):
- svc._cached_config = svc.configs().get().execute()
- return svc._cached_config
-
-def get_vocabulary_once(svc):
- if not svc._rootDesc.get('resources').get('vocabularies', False):
- # Old API server version, no vocabulary export endpoint
- return {}
- if not hasattr(svc, '_cached_vocabulary'):
- svc._cached_vocabulary = svc.vocabularies().get().execute()
- return svc._cached_vocabulary
diff --git a/sdk/python/arvados_version.py b/sdk/python/arvados_version.py
index 092131d930..794b6afe42 100644
--- a/sdk/python/arvados_version.py
+++ b/sdk/python/arvados_version.py
@@ -1,61 +1,145 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+#
+# This file runs in one of three modes:
+#
+# 1. If the ARVADOS_BUILDING_VERSION environment variable is set, it writes
+# _version.py and generates dependencies based on that value.
+# 2. If running from an arvados Git checkout, it writes _version.py
+# and generates dependencies from Git.
+# 3. Otherwise, we expect this is source previously generated from Git, and
+# it reads _version.py and generates dependencies from it.
-import subprocess
-import time
import os
import re
+import runpy
+import subprocess
import sys
-SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
-VERSION_PATHS = {
- SETUP_DIR,
- os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
- }
+from pathlib import Path
+
+# These maps explain the relationships between different Python modules in
+# the arvados repository. We use these to help generate setup.py.
+PACKAGE_DEPENDENCY_MAP = {
+ 'arvados-cwl-runner': ['arvados-python-client', 'crunchstat_summary'],
+ 'arvados-user-activity': ['arvados-python-client'],
+ 'arvados_fuse': ['arvados-python-client'],
+ 'crunchstat_summary': ['arvados-python-client'],
+}
+PACKAGE_MODULE_MAP = {
+ 'arvados-cwl-runner': 'arvados_cwl',
+ 'arvados-docker-cleaner': 'arvados_docker',
+ 'arvados-python-client': 'arvados',
+ 'arvados-user-activity': 'arvados_user_activity',
+ 'arvados_fuse': 'arvados_fuse',
+ 'crunchstat_summary': 'crunchstat_summary',
+}
+PACKAGE_SRCPATH_MAP = {
+ 'arvados-cwl-runner': Path('sdk', 'cwl'),
+ 'arvados-docker-cleaner': Path('services', 'dockercleaner'),
+ 'arvados-python-client': Path('sdk', 'python'),
+ 'arvados-user-activity': Path('tools', 'user-activity'),
+ 'arvados_fuse': Path('services', 'fuse'),
+ 'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+}
+
+ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
+SETUP_DIR = Path(__file__).absolute().parent
+try:
+ REPO_PATH = Path(subprocess.check_output(
+ ['git', '-C', str(SETUP_DIR), 'rev-parse', '--show-toplevel'],
+ stderr=subprocess.DEVNULL,
+ text=True,
+ ).rstrip('\n'))
+except (subprocess.CalledProcessError, OSError):
+ REPO_PATH = None
+else:
+ # Verify this is the arvados monorepo
+ if all((REPO_PATH / path).exists() for path in PACKAGE_SRCPATH_MAP.values()):
+ PACKAGE_NAME, = (
+ pkg_name for pkg_name, path in PACKAGE_SRCPATH_MAP.items()
+ if (REPO_PATH / path) == SETUP_DIR
+ )
+ MODULE_NAME = PACKAGE_MODULE_MAP[PACKAGE_NAME]
+ VERSION_SCRIPT_PATH = Path(REPO_PATH, 'build', 'version-at-commit.sh')
+ else:
+ REPO_PATH = None
+if REPO_PATH is None:
+ (PACKAGE_NAME, MODULE_NAME), = (
+ (pkg_name, mod_name)
+ for pkg_name, mod_name in PACKAGE_MODULE_MAP.items()
+ if (SETUP_DIR / mod_name).is_dir()
+ )
+
+def short_tests_only(arglist=sys.argv):
+ try:
+ arglist.remove('--short-tests-only')
+ except ValueError:
+ return False
+ else:
+ return True
+
+def git_log_output(path, *args):
+ return subprocess.check_output(
+ ['git', '-C', str(REPO_PATH),
+ 'log', '--first-parent', '--max-count=1',
+ *args, str(path)],
+ text=True,
+ ).rstrip('\n')
def choose_version_from():
- ts = {}
- for path in VERSION_PATHS:
- ts[subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', path]).strip()] = path
-
- sorted_ts = sorted(ts.items())
- getver = sorted_ts[-1][1]
- print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+ ver_paths = [SETUP_DIR, VERSION_SCRIPT_PATH, *(
+ PACKAGE_SRCPATH_MAP[pkg]
+ for pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ())
+ )]
+ getver = max(ver_paths, key=lambda path: git_log_output(path, '--format=format:%ct'))
+ print(f"Using {getver} for version number calculation of {SETUP_DIR}", file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
- myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
- '--format=%H', curdir]).strip()
- myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
- return myversion
+ myhash = git_log_output(curdir, '--format=%H')
+ return subprocess.check_output(
+ [str(VERSION_SCRIPT_PATH), myhash],
+ text=True,
+ ).rstrip('\n')
def save_version(setup_dir, module, v):
- v = v.replace("~dev", ".dev").replace("~rc", "rc")
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ with Path(setup_dir, module, '_version.py').open('w') as fp:
+ print(f"__version__ = {v!r}", file=fp)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
-
-def get_version(setup_dir, module):
- env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+ file_vars = runpy.run_path(Path(setup_dir, module, '_version.py'))
+ return file_vars['__version__']
- if env_version:
- save_version(setup_dir, module, env_version)
+def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
+ if ENV_VERSION:
+ version = ENV_VERSION
+ elif REPO_PATH is None:
+ return read_version(setup_dir, module)
else:
- try:
- save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError) as err:
- print("ERROR: {0}".format(err), file=sys.stderr)
- pass
+ version = git_version_at_commit()
+ version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ save_version(setup_dir, module, version)
+ return version
- return read_version(setup_dir, module)
+def iter_dependencies(version=None):
+ if version is None:
+ version = get_version()
+ # A packaged development release should be installed with other
+ # development packages built from the same source, but those
+ # dependencies may have earlier "dev" versions (read: less recent
+ # Git commit timestamps). This compatible version dependency
+ # expresses that as closely as possible. Allowing versions
+ # compatible with .dev0 allows any development release.
+ # Regular expression borrowed partially from
+ #
+ dep_ver, match_count = re.subn(r'\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)
+ dep_op = '~=' if match_count else '=='
+ for dep_pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ()):
+ yield f'{dep_pkg}{dep_op}{dep_ver}'
# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
if __name__ == '__main__':
- print(get_version(SETUP_DIR, "arvados"))
+ print(get_version())
diff --git a/sdk/python/discovery2pydoc.py b/sdk/python/discovery2pydoc.py
new file mode 100755
index 0000000000..70a51371ac
--- /dev/null
+++ b/sdk/python/discovery2pydoc.py
@@ -0,0 +1,431 @@
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+"""discovery2pydoc - Build skeleton Python from the Arvados discovery document
+
+This tool reads the Arvados discovery document and writes a Python source file
+with classes and methods that correspond to the resources that
+google-api-python-client builds dynamically. This source does not include any
+implementation, but it does include real method signatures and documentation
+strings, so it's useful as documentation for tools that read Python source,
+including pydoc and pdoc.
+
+If you run this tool with the path to a discovery document, it uses no
+dependencies outside the Python standard library. If it needs to read
+configuration to find the discovery document dynamically, it'll load the
+`arvados` module to do that.
+"""
+
+import argparse
+import inspect
+import json
+import keyword
+import operator
+import os
+import pathlib
+import re
+import sys
+import urllib.parse
+import urllib.request
+
+from typing import (
+ Any,
+ Callable,
+ Mapping,
+ Optional,
+ Sequence,
+)
+
+LOWERCASE = operator.methodcaller('lower')
+NAME_KEY = operator.attrgetter('name')
+STDSTREAM_PATH = pathlib.Path('-')
+TITLECASE = operator.methodcaller('title')
+
+_ALIASED_METHODS = frozenset([
+ 'destroy',
+ 'index',
+ 'show',
+])
+_DEPRECATED_NOTICE = '''
+
+.. WARNING:: Deprecated
+ This resource is deprecated in the Arvados API.
+'''
+_DEPRECATED_RESOURCES = frozenset([
+ 'Humans',
+ 'JobTasks',
+ 'Jobs',
+ 'KeepDisks',
+ 'Nodes',
+ 'PipelineInstances',
+ 'PipelineTemplates',
+ 'Specimens'
+ 'Traits',
+])
+_DEPRECATED_SCHEMAS = frozenset([
+ *(name[:-1] for name in _DEPRECATED_RESOURCES),
+ *(f'{name[:-1]}List' for name in _DEPRECATED_RESOURCES),
+])
+
+_LIST_PYDOC = '''
+
+This is the dictionary object returned when you call `{cls_name}s.list`.
+If you just want to iterate all objects that match your search criteria,
+consider using `arvados.util.keyset_list_all`.
+If you work with this raw object, the keys of the dictionary are documented
+below, along with their types. The `items` key maps to a list of matching
+`{cls_name}` objects.
+'''
+_MODULE_PYDOC = '''Arvados API client reference documentation
+
+This module provides reference documentation for the interface of the
+Arvados API client, including method signatures and type information for
+returned objects. However, the functions in `arvados.api` will return
+different classes at runtime that are generated dynamically from the Arvados
+API discovery document. The classes in this module do not have any
+implementation, and you should not instantiate them in your code.
+
+If you're just starting out, `ArvadosAPIClient` documents the methods
+available from the client object. From there, you can follow the trail into
+resource methods, request objects, and finally the data dictionaries returned
+by the API server.
+'''
+_SCHEMA_PYDOC = '''
+
+This is the dictionary object that represents a single {cls_name} in Arvados
+and is returned by most `{cls_name}s` methods.
+The keys of the dictionary are documented below, along with their types.
+Not every key may appear in every dictionary returned by an API call.
+When a method doesn't return all the data, you can use its `select` parameter
+to list the specific keys you need. Refer to the API documentation for details.
+'''
+
+_MODULE_PRELUDE = '''
+import googleapiclient.discovery
+import googleapiclient.http
+import httplib2
+import sys
+from typing import Any, Dict, Generic, List, Optional, TypeVar
+if sys.version_info < (3, 8):
+ from typing_extensions import TypedDict
+else:
+ from typing import TypedDict
+
+# ST represents an API response type
+ST = TypeVar('ST', bound=TypedDict)
+'''
+_REQUEST_CLASS = '''
+class ArvadosAPIRequest(googleapiclient.http.HttpRequest, Generic[ST]):
+ """Generic API request object
+
+ When you call an API method in the Arvados Python SDK, it returns a
+ request object. You usually call `execute()` on this object to submit the
+ request to your Arvados API server and retrieve the response. `execute()`
+ will return the type of object annotated in the subscript of
+ `ArvadosAPIRequest`.
+ """
+
+ def execute(self, http: Optional[httplib2.Http]=None, num_retries: int=0) -> ST:
+ """Execute this request and return the response
+
+ Arguments:
+
+ * http: httplib2.Http | None --- The HTTP client object to use to
+ execute the request. If not specified, uses the HTTP client object
+ created with the API client object.
+
+ * num_retries: int --- The maximum number of times to retry this
+ request if the server returns a retryable failure. The API client
+ object also has a maximum number of retries specified when it is
+ instantiated (see `arvados.api.api_client`). This request is run
+ with the larger of that number and this argument. Default 0.
+ """
+
+'''
+
+# Annotation represents a valid Python type annotation. Future development
+# could expand this to include other valid types like `type`.
+Annotation = str
+_TYPE_MAP: Mapping[str, Annotation] = {
+ # Map the API's JavaScript-based type names to Python annotations.
+ # Some of these may disappear after Arvados issue #19795 is fixed.
+ 'Array': 'List',
+ 'array': 'List',
+ 'boolean': 'bool',
+ # datetime fields are strings in ISO 8601 format.
+ 'datetime': 'str',
+ 'Hash': 'Dict[str, Any]',
+ 'integer': 'int',
+ 'object': 'Dict[str, Any]',
+ 'string': 'str',
+ 'text': 'str',
+}
+
+def get_type_annotation(name: str) -> str:
+ return _TYPE_MAP.get(name, name)
+
+def to_docstring(s: str, indent: int) -> str:
+ prefix = ' ' * indent
+ s = s.replace('"""', '""\"')
+ s = re.sub(r'(\n+)', r'\1' + prefix, s)
+ s = s.strip()
+ if '\n' in s:
+ return f'{prefix}"""{s}\n{prefix}"""'
+ else:
+ return f'{prefix}"""{s}"""'
+
+def transform_name(s: str, sep: str, fix_part: Callable[[str], str]) -> str:
+ return sep.join(fix_part(part) for part in s.split('_'))
+
+def classify_name(s: str) -> str:
+ return transform_name(s, '', TITLECASE)
+
+def humanize_name(s: str) -> str:
+ return transform_name(s, ' ', LOWERCASE)
+
+class Parameter(inspect.Parameter):
+ def __init__(self, name: str, spec: Mapping[str, Any]) -> None:
+ self.api_name = name
+ self._spec = spec
+ if keyword.iskeyword(name):
+ name += '_'
+ super().__init__(
+ name,
+ inspect.Parameter.KEYWORD_ONLY,
+ annotation=get_type_annotation(self._spec['type']),
+ # In normal Python the presence of a default tells you whether or
+ # not an argument is required. In the API the `required` flag tells
+ # us that, and defaults are specified inconsistently. Don't show
+ # defaults in the signature: it adds noise and makes things more
+ # confusing for the reader about what's required and what's
+ # optional. The docstring can explain in better detail, including
+ # the default value.
+ default=inspect.Parameter.empty,
+ )
+
+ def default_value(self) -> object:
+ try:
+ src_value: str = self._spec['default']
+ except KeyError:
+ return None
+ if src_value == 'true':
+ return True
+ elif src_value == 'false':
+ return False
+ elif src_value.isdigit():
+ return int(src_value)
+ else:
+ return src_value
+
+ def is_required(self) -> bool:
+ return self._spec['required']
+
+ def doc(self) -> str:
+ default_value = self.default_value()
+ if default_value is None:
+ default_doc = ''
+ else:
+ default_doc = f"Default {default_value!r}."
+ description = self._spec['description']
+ doc_parts = [f'{self.api_name}: {self.annotation}']
+ if description or default_doc:
+ doc_parts.append('---')
+ if description:
+ doc_parts.append(description)
+ if default_doc:
+ doc_parts.append(default_doc)
+ return f'''
+* {' '.join(doc_parts)}
+'''
+
+
+class Method:
+ def __init__(
+ self,
+ name: str,
+ spec: Mapping[str, Any],
+ annotate: Callable[[Annotation], Annotation]=str,
+ ) -> None:
+ self.name = name
+ self._spec = spec
+ self._annotate = annotate
+ self._required_params = []
+ self._optional_params = []
+ for param_name, param_spec in spec['parameters'].items():
+ param = Parameter(param_name, param_spec)
+ if param.is_required():
+ param_list = self._required_params
+ else:
+ param_list = self._optional_params
+ param_list.append(param)
+ self._required_params.sort(key=NAME_KEY)
+ self._optional_params.sort(key=NAME_KEY)
+
+ def signature(self) -> inspect.Signature:
+ parameters = [
+ inspect.Parameter('self', inspect.Parameter.POSITIONAL_OR_KEYWORD),
+ *self._required_params,
+ *self._optional_params,
+ ]
+ try:
+ returns = get_type_annotation(self._spec['response']['$ref'])
+ except KeyError:
+ returns = 'Dict[str, Any]'
+ returns = self._annotate(returns)
+ return inspect.Signature(parameters, return_annotation=returns)
+
+ def doc(self, doc_slice: slice=slice(None)) -> str:
+ doc_lines = self._spec['description'].splitlines(keepends=True)[doc_slice]
+ if not doc_lines[-1].endswith('\n'):
+ doc_lines.append('\n')
+ if self._required_params:
+ doc_lines.append("\nRequired parameters:\n")
+ doc_lines.extend(param.doc() for param in self._required_params)
+ if self._optional_params:
+ doc_lines.append("\nOptional parameters:\n")
+ doc_lines.extend(param.doc() for param in self._optional_params)
+ return f'''
+ def {self.name}{self.signature()}:
+{to_docstring(''.join(doc_lines), 8)}
+'''
+
+
+def document_schema(name: str, spec: Mapping[str, Any]) -> str:
+ description = spec['description']
+ if name in _DEPRECATED_SCHEMAS:
+ description += _DEPRECATED_NOTICE
+ if name.endswith('List'):
+ desc_fmt = _LIST_PYDOC
+ cls_name = name[:-4]
+ else:
+ desc_fmt = _SCHEMA_PYDOC
+ cls_name = name
+ description += desc_fmt.format(cls_name=cls_name)
+ lines = [
+ f"class {name}(TypedDict, total=False):",
+ to_docstring(description, 4),
+ ]
+ for field_name, field_spec in spec['properties'].items():
+ field_type = get_type_annotation(field_spec['type'])
+ try:
+ subtype = field_spec['items']['$ref']
+ except KeyError:
+ pass
+ else:
+ field_type += f"[{get_type_annotation(subtype)}]"
+
+ field_line = f" {field_name}: {field_type!r}"
+ try:
+ field_line += f" = {field_spec['default']!r}"
+ except KeyError:
+ pass
+ lines.append(field_line)
+
+ field_doc: str = field_spec.get('description', '')
+ if field_spec['type'] == 'datetime':
+ field_doc += "\n\nString in ISO 8601 datetime format. Pass it to `ciso8601.parse_datetime` to build a `datetime.datetime`."
+ if field_doc:
+ lines.append(to_docstring(field_doc, 4))
+ lines.append('\n')
+ return '\n'.join(lines)
+
+def document_resource(name: str, spec: Mapping[str, Any]) -> str:
+ class_name = classify_name(name)
+ docstring = f"Methods to query and manipulate Arvados {humanize_name(name)}"
+ if class_name in _DEPRECATED_RESOURCES:
+ docstring += _DEPRECATED_NOTICE
+ methods = [
+ Method(key, meth_spec, 'ArvadosAPIRequest[{}]'.format)
+ for key, meth_spec in spec['methods'].items()
+ if key not in _ALIASED_METHODS
+ ]
+ return f'''class {class_name}:
+{to_docstring(docstring, 4)}
+{''.join(method.doc(slice(1)) for method in sorted(methods, key=NAME_KEY))}
+'''
+
+def parse_arguments(arglist: Optional[Sequence[str]]) -> argparse.Namespace:
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--output-file', '-O',
+ type=pathlib.Path,
+ metavar='PATH',
+ default=STDSTREAM_PATH,
+ help="""Path to write output. Specify `-` to use stdout (the default)
+""")
+ parser.add_argument(
+ 'discovery_url',
+ nargs=argparse.OPTIONAL,
+ metavar='URL',
+ help="""URL or file path of a discovery document to load.
+Specify `-` to use stdin.
+If not provided, retrieved dynamically from Arvados client configuration.
+""")
+ args = parser.parse_args(arglist)
+ if args.discovery_url is None:
+ from arvados.api import api_kwargs_from_config
+ discovery_fmt = api_kwargs_from_config('v1')['discoveryServiceUrl']
+ args.discovery_url = discovery_fmt.format(api='arvados', apiVersion='v1')
+ elif args.discovery_url == '-':
+ args.discovery_url = 'file:///dev/stdin'
+ else:
+ parts = urllib.parse.urlsplit(args.discovery_url)
+ if not (parts.scheme or parts.netloc):
+ args.discovery_url = pathlib.Path(args.discovery_url).resolve().as_uri()
+ # Our output is Python source, so it should be UTF-8 regardless of locale.
+ if args.output_file == STDSTREAM_PATH:
+ args.out_file = open(sys.stdout.fileno(), 'w', encoding='utf-8', closefd=False)
+ else:
+ args.out_file = args.output_file.open('w', encoding='utf-8')
+ return args
+
+def main(arglist: Optional[Sequence[str]]=None) -> int:
+ args = parse_arguments(arglist)
+ with urllib.request.urlopen(args.discovery_url) as discovery_file:
+ status = discovery_file.getcode()
+ if not (status is None or 200 <= status < 300):
+ print(
+ f"error getting {args.discovery_url}: server returned {discovery_file.status}",
+ file=sys.stderr,
+ )
+ return os.EX_IOERR
+ discovery_document = json.load(discovery_file)
+ print(
+ to_docstring(_MODULE_PYDOC, indent=0),
+ _MODULE_PRELUDE,
+ sep='\n', file=args.out_file,
+ )
+
+ schemas = sorted(discovery_document['schemas'].items())
+ for name, schema_spec in schemas:
+ print(document_schema(name, schema_spec), file=args.out_file)
+
+ resources = sorted(discovery_document['resources'].items())
+ for name, resource_spec in resources:
+ print(document_resource(name, resource_spec), file=args.out_file)
+
+ print(
+ _REQUEST_CLASS,
+ '''class ArvadosAPIClient(googleapiclient.discovery.Resource):''',
+ sep='\n', file=args.out_file,
+ )
+ for name, _ in resources:
+ class_name = classify_name(name)
+ docstring = f"Return an instance of `{class_name}` to call methods via this client"
+ if class_name in _DEPRECATED_RESOURCES:
+ docstring += _DEPRECATED_NOTICE
+ method_spec = {
+ 'description': docstring,
+ 'parameters': {},
+ 'response': {
+ '$ref': class_name,
+ },
+ }
+ print(Method(name, method_spec).doc(), file=args.out_file)
+
+ args.out_file.close()
+ return os.EX_OK
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
index d28df09987..a2ec703556 100644
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -8,18 +8,78 @@ import os
import sys
import re
+from pathlib import Path
from setuptools import setup, find_packages
-
-SETUP_DIR = os.path.dirname(__file__) or '.'
-README = os.path.join(SETUP_DIR, 'README.rst')
+from setuptools.command import build_py
import arvados_version
-version = arvados_version.get_version(SETUP_DIR, "arvados")
+version = arvados_version.get_version()
+short_tests_only = arvados_version.short_tests_only()
+README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
+
+class BuildPython(build_py.build_py):
+ """Extend setuptools `build_py` to generate API documentation
+
+ This class implements a setuptools subcommand, so it follows
+ [the SubCommand protocol][1]. Most of these methods are required by that
+ protocol, except `should_run`, which we register as the subcommand
+ predicate.
+
+ [1]: https://setuptools.pypa.io/en/latest/userguide/extension.html#setuptools.command.build.SubCommand
+ """
+ # This is implemented as functionality on top of `build_py`, rather than a
+ # dedicated subcommand, because that's the only way I can find to run this
+ # code during both `build` and `install`. setuptools' `install` command
+ # normally calls specific `build` subcommands directly, rather than calling
+ # the entire command, so it skips custom subcommands.
+ user_options = build_py.build_py.user_options + [
+ ('discovery-json=', 'J', 'JSON discovery document used to build pydoc'),
+ ('discovery-output=', 'O', 'relative path to write discovery document pydoc'),
+ ]
+
+ def initialize_options(self):
+ super().initialize_options()
+ self.discovery_json = 'arvados-v1-discovery.json'
+ self.discovery_output = str(Path('arvados', 'api_resources.py'))
+
+ def _relative_path(self, src, optname):
+ retval = Path(src)
+ if retval.is_absolute():
+ raise Exception(f"--{optname} should be a relative path")
+ else:
+ return retval
+
+ def finalize_options(self):
+ super().finalize_options()
+ self.json_path = self._relative_path(self.discovery_json, 'discovery-json')
+ self.out_path = Path(
+ self.build_lib,
+ self._relative_path(self.discovery_output, 'discovery-output'),
+ )
+
+ def run(self):
+ super().run()
+ import discovery2pydoc
+ arglist = ['--output-file', str(self.out_path), str(self.json_path)]
+ returncode = discovery2pydoc.main(arglist)
+ if returncode != 0:
+ raise Exception(f"discovery2pydoc exited {returncode}")
+
+ def get_outputs(self):
+ retval = super().get_outputs()
+ retval.append(str(self.out_path))
+ return retval
+
+ def get_source_files(self):
+ retval = super().get_source_files()
+ retval.append(str(self.json_path))
+ return retval
+
+ def get_output_mapping(self):
+ retval = super().get_output_mapping()
+ retval[str(self.json_path)] = str(self.out_path)
+ return retval
-short_tests_only = False
-if '--short-tests-only' in sys.argv:
- short_tests_only = True
- sys.argv.remove('--short-tests-only')
setup(name='arvados-python-client',
version=version,
@@ -30,6 +90,9 @@ setup(name='arvados-python-client',
url="https://arvados.org",
download_url="https://github.com/arvados/arvados.git",
license='Apache 2.0',
+ cmdclass={
+ 'build_py': BuildPython,
+ },
packages=find_packages(),
scripts=[
'bin/arv-copy',
@@ -46,21 +109,21 @@ setup(name='arvados-python-client',
('share/doc/arvados-python-client', ['LICENSE-2.0.txt', 'README.rst']),
],
install_requires=[
+ *arvados_version.iter_dependencies(version),
'ciso8601 >=2.0.0',
'future',
- 'google-api-python-client >=1.6.2, <2',
- 'google-auth<2',
- 'httplib2 >=0.9.2, <0.20.2',
- 'pycurl >=7.19.5.1, <7.45.0',
- 'ruamel.yaml >=0.15.54, <0.17.11',
- 'setuptools',
- 'ws4py >=0.4.2',
- 'protobuf<4.0.0dev'
+ 'google-api-python-client >=2.1.0',
+ 'google-auth',
+ 'httplib2 >=0.9.2',
+ 'pycurl >=7.19.5.1',
+ 'setuptools >=40.3.0',
+ 'websockets >=11.0',
],
+ python_requires="~=3.8",
classifiers=[
'Programming Language :: Python :: 3',
],
test_suite='tests',
- tests_require=['pbr<1.7.0', 'mock>=1.0,<4', 'PyYAML'],
+ tests_require=['PyYAML', 'parameterized'],
zip_safe=False
)
diff --git a/sdk/python/tests/arvados_testutil.py b/sdk/python/tests/arvados_testutil.py
index d9b3ca86c4..6d58b23360 100644
--- a/sdk/python/tests/arvados_testutil.py
+++ b/sdk/python/tests/arvados_testutil.py
@@ -4,9 +4,6 @@
from future import standard_library
standard_library.install_aliases()
-from builtins import str
-from builtins import range
-from builtins import object
import arvados
import contextlib
import errno
@@ -14,7 +11,6 @@ import hashlib
import http.client
import httplib2
import io
-import mock
import os
import pycurl
import queue
@@ -23,11 +19,8 @@ import sys
import tempfile
import unittest
-if sys.version_info >= (3, 0):
- from io import StringIO, BytesIO
-else:
- from cStringIO import StringIO
- BytesIO = StringIO
+from io import StringIO, BytesIO
+from unittest import mock
# Use this hostname when you want to make sure the traffic will be
# instantly refused. 100::/64 is a dedicated black hole.
@@ -60,10 +53,10 @@ def mock_responses(body, *codes, **headers):
return mock.patch('httplib2.Http.request', side_effect=queue_with((
(fake_httplib2_response(code, **headers), body) for code in codes)))
-def mock_api_responses(api_client, body, codes, headers={}):
+def mock_api_responses(api_client, body, codes, headers={}, method='request'):
if not isinstance(body, bytes) and hasattr(body, 'encode'):
body = body.encode()
- return mock.patch.object(api_client._http, 'request', side_effect=queue_with((
+ return mock.patch.object(api_client._http, method, side_effect=queue_with((
(fake_httplib2_response(code, **headers), body) for code in codes)))
def str_keep_locator(s):
@@ -280,3 +273,30 @@ if sys.version_info < (3, 0):
return self.assertNotRegexpMatches(*args, **kwargs)
unittest.TestCase.assertRegex = assertRegex
unittest.TestCase.assertNotRegex = assertNotRegex
+
+def binary_compare(a, b):
+ if len(a) != len(b):
+ return False
+ for i in range(0, len(a)):
+ if a[i] != b[i]:
+ return False
+ return True
+
+def make_block_cache(disk_cache):
+ if disk_cache:
+ disk_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "arvados", "keep")
+ shutil.rmtree(disk_cache_dir, ignore_errors=True)
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=disk_cache)
+ return block_cache
+
+
+class DiskCacheBase:
+ def make_block_cache(self, disk_cache):
+ self.disk_cache_dir = tempfile.mkdtemp() if disk_cache else None
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=disk_cache,
+ disk_cache_dir=self.disk_cache_dir)
+ return block_cache
+
+ def tearDown(self):
+ if self.disk_cache_dir:
+ shutil.rmtree(self.disk_cache_dir)
diff --git a/sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar b/sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar
new file mode 100644
index 0000000000..a4b3d86390
Binary files /dev/null and b/sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar differ
diff --git a/sdk/python/tests/data/hello-world-ManifestV2.tar b/sdk/python/tests/data/hello-world-ManifestV2.tar
new file mode 100644
index 0000000000..b98e7c7acd
Binary files /dev/null and b/sdk/python/tests/data/hello-world-ManifestV2.tar differ
diff --git a/sdk/python/tests/data/hello-world-README.txt b/sdk/python/tests/data/hello-world-README.txt
new file mode 100644
index 0000000000..8c6a7de31e
--- /dev/null
+++ b/sdk/python/tests/data/hello-world-README.txt
@@ -0,0 +1,25 @@
+The hello-world-*.tar files are archived from the official Docker
+hello-world:latest image available on 2024-02-01,
+sha256:d2c94e258dcb3c5ac2798d32e1249e42ef01cba4841c2234249495f87264ac5a.
+
+
+Copyright (c) 2014 Docker, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/sdk/python/tests/fed-migrate/jenkins.sh b/sdk/python/tests/fed-migrate/jenkins.sh
index e5dd8aa913..46981e5016 100755
--- a/sdk/python/tests/fed-migrate/jenkins.sh
+++ b/sdk/python/tests/fed-migrate/jenkins.sh
@@ -1,4 +1,7 @@
#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
if test -z "$WORKSPACE" ; then
echo "WORKSPACE unset"
@@ -11,20 +14,13 @@ docker rm fedbox1-data fedbox2-data fedbox3-data
set -ex
-mkdir -p $WORKSPACE/tmp
-cd $WORKSPACE/tmp
-virtualenv --python python3 venv3
-. venv3/bin/activate
-
-cd $WORKSPACE/sdk/python
-pip install -e .
-
-cd $WORKSPACE/sdk/cwl
-pip install -e .
+mkdir -p "$WORKSPACE/tmp/arvbox"
+python3 -m venv "$WORKSPACE/tmp/venv3"
+"$WORKSPACE/tmp/venv3/bin/pip" install -e "$WORKSPACE/sdk/python" "$WORKSPACE/sdk/cwl"
+alias cwltool='"$WORKSPACE/tmp/venv3/bin/cwltool"'
export PATH=$PATH:$WORKSPACE/tools/arvbox/bin
-mkdir -p $WORKSPACE/tmp/arvbox
cd $WORKSPACE/sdk/python/tests/fed-migrate
cwltool arvbox-make-federation.cwl \
--arvbox_base $WORKSPACE/tmp/arvbox \
diff --git a/sdk/python/tests/nginx.conf b/sdk/python/tests/nginx.conf
index 4ad3eda420..446b95ca42 100644
--- a/sdk/python/tests/nginx.conf
+++ b/sdk/python/tests/nginx.conf
@@ -15,6 +15,13 @@ http {
fastcgi_temp_path "{{TMPDIR}}";
uwsgi_temp_path "{{TMPDIR}}";
scgi_temp_path "{{TMPDIR}}";
+ geo $external_client {
+ default 1;
+ 127.0.0.0/8 0;
+ ::1 0;
+ fd00::/8 0;
+ {{INTERNALSUBNETS}}
+ }
upstream controller {
server {{UPSTREAMHOST}}:{{CONTROLLERPORT}};
}
@@ -26,7 +33,10 @@ http {
client_max_body_size 0;
location / {
proxy_pass http://controller;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
+ proxy_set_header X-External-Client $external_client;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_redirect off;
@@ -150,30 +160,73 @@ http {
proxy_request_buffering off;
}
}
- upstream workbench1 {
- server {{UPSTREAMHOST}}:{{WORKBENCH1PORT}};
- }
- server {
- listen {{LISTENHOST}}:{{WORKBENCH1SSLPORT}} ssl;
- server_name workbench1 workbench1.* workbench.*;
- ssl_certificate "{{SSLCERT}}";
- ssl_certificate_key "{{SSLKEY}}";
- location / {
- proxy_pass http://workbench1;
- proxy_set_header Host $http_host;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto https;
- proxy_redirect off;
- }
+ # wb1->wb2 redirects copied from
+ # /tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls
+ map $request_uri $wb1_redirect {
+ default 0;
+
+ ~^/actions\?uuid=(.*-4zz18-.*) /collections/$1;
+ ~^/actions\?uuid=(.*-j7d0g-.*) /projects/$1;
+ ~^/actions\?uuid=(.*-tpzed-.*) /projects/$1;
+ ~^/actions\?uuid=(.*-7fd4e-.*) /workflows/$1;
+ ~^/actions\?uuid=(.*-xvhdp-.*) /processes/$1;
+ ~^/actions\?uuid=(.*) /;
+
+ ^/work_units/(.*) /processes/$1;
+ ^/container_requests/(.*) /processes/$1;
+ ^/users/(.*) /user/$1;
+ ^/groups/(.*) /group/$1;
+
+ ^/virtual_machines.* /virtual-machines-admin;
+ ^/users/.*/virtual_machines /virtual-machines-user;
+ ^/authorized_keys.* /ssh-keys-admin;
+ ^/users/.*/ssh_keys /ssh-keys-user;
+ ^/containers.* /all_processes;
+ ^/container_requests /all_processes;
+ ^/job.* /all_processes;
+ ^/users/link_account /link_account;
+ ^/keep_services.* /keep-services;
+ ^/trash_items.* /trash;
+
+ ^/themes.* /;
+ ^/keep_disks.* /;
+ ^/user_agreements.* /;
+ ^/nodes.* /;
+ ^/humans.* /;
+ ^/traits.* /;
+ ^/sessions.* /;
+ ^/logout.* /;
+ ^/logged_out.* /;
+ ^/current_token /;
+ ^/logs.* /;
+ ^/factory_jobs.* /;
+ ^/uploaded_datasets.* /;
+ ^/specimens.* /;
+ ^/pipeline_templates.* /;
+ ^/pipeline_instances.* /;
}
upstream workbench2 {
server {{UPSTREAMHOST}}:{{WORKBENCH2PORT}};
}
server {
listen {{LISTENHOST}}:{{WORKBENCH2SSLPORT}} ssl;
- server_name workbench2 workbench2.*;
+ listen {{LISTENHOST}}:{{WORKBENCH1SSLPORT}} ssl;
+ server_name workbench2 workbench2.* workbench1 workbench1.* workbench workbench.*;
ssl_certificate "{{SSLCERT}}";
ssl_certificate_key "{{SSLKEY}}";
+
+ if ($wb1_redirect) {
+ return 301 $wb1_redirect;
+ }
+
+ # file download redirects
+ if ($arg_disposition = attachment) {
+ rewrite ^/collections/([^/]*)/(.*) /?redirectToDownload=/c=$1/$2? redirect;
+ }
+ if ($arg_disposition = inline) {
+ rewrite ^/collections/([^/]*)/(.*) /?redirectToPreview=/c=$1/$2? redirect;
+ }
+
location / {
proxy_pass http://workbench2;
proxy_set_header Host $http_host;
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
index e32d385f73..787837b723 100644
--- a/sdk/python/tests/run_test_server.py
+++ b/sdk/python/tests/run_test_server.py
@@ -2,23 +2,18 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-from __future__ import division
-from builtins import str
-from builtins import range
import argparse
import atexit
import errno
import glob
import httplib2
import os
-import pipes
import random
import re
+import shlex
import shutil
import signal
import socket
-import string
import subprocess
import sys
import tempfile
@@ -26,10 +21,7 @@ import time
import unittest
import yaml
-try:
- from urllib.parse import urlparse
-except ImportError:
- from urlparse import urlparse
+from urllib.parse import urlparse
MY_DIRNAME = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__' and os.path.exists(
@@ -41,6 +33,15 @@ if __name__ == '__main__' and os.path.exists(
import arvados
import arvados.config
+# This module starts subprocesses and records them in pidfiles so they
+# can be managed by other processes (incl. after this process
+# exits). But if we don't keep a reference to each subprocess object
+# somewhere, the subprocess destructor runs, and we get a lot of
+# ResourceWarning noise in test logs. This is our bucket of subprocess
+# objects whose destructors we don't want to run but are otherwise
+# unneeded.
+_detachedSubprocesses = []
+
ARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))
SERVICES_SRC_DIR = os.path.join(ARVADOS_DIR, 'services')
@@ -248,14 +249,17 @@ def _logfilename(label):
stdbuf+['cat', fifo],
stdin=open('/dev/null'),
stdout=subprocess.PIPE)
+ _detachedSubprocesses.append(cat)
tee = subprocess.Popen(
stdbuf+['tee', '-a', logfilename],
stdin=cat.stdout,
stdout=subprocess.PIPE)
- subprocess.Popen(
+ _detachedSubprocesses.append(tee)
+ sed = subprocess.Popen(
stdbuf+['sed', '-e', 's/^/['+label+'] /'],
stdin=tee.stdout,
stdout=sys.stderr)
+ _detachedSubprocesses.append(sed)
return fifo
def run(leave_running_atexit=False):
@@ -338,7 +342,7 @@ def run(leave_running_atexit=False):
resdir = subprocess.check_output(['bundle', 'exec', 'passenger-config', 'about', 'resourcesdir']).decode().rstrip()
with open(resdir + '/templates/standalone/config.erb') as f:
template = f.read()
- newtemplate = re.sub('http {', 'http {\n passenger_stat_throttle_rate 0;', template)
+ newtemplate = re.sub(r'http \{', 'http {\n passenger_stat_throttle_rate 0;', template)
if newtemplate == template:
raise "template edit failed"
with open('tmp/passenger-nginx.conf.erb', 'w') as f:
@@ -367,6 +371,7 @@ def run(leave_running_atexit=False):
'--ssl-certificate', 'tmp/self-signed.pem',
'--ssl-certificate-key', 'tmp/self-signed.key'],
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+ _detachedSubprocesses.append(railsapi)
if not leave_running_atexit:
atexit.register(kill_server_pid, pid_file, passenger_root=api_src_dir)
@@ -444,6 +449,7 @@ def run_controller():
controller = subprocess.Popen(
["arvados-server", "controller"],
stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ _detachedSubprocesses.append(controller)
with open(_pidfile('controller'), 'w') as f:
f.write(str(controller.pid))
_wait_until_port_listens(port)
@@ -463,6 +469,7 @@ def run_ws():
ws = subprocess.Popen(
["arvados-server", "ws"],
stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ _detachedSubprocesses.append(ws)
with open(_pidfile('ws'), 'w') as f:
f.write(str(ws.pid))
_wait_until_port_listens(port)
@@ -496,6 +503,7 @@ def _start_keep(n, blob_signing=False):
with open('/dev/null') as _stdin:
child = subprocess.Popen(
keep_cmd, stdin=_stdin, stdout=logf, stderr=logf, close_fds=True)
+ _detachedSubprocesses.append(child)
print('child.pid is %d'%child.pid, file=sys.stderr)
with open(_pidfile('keep{}'.format(n)), 'w') as f:
@@ -562,6 +570,7 @@ def run_keep_proxy():
logf = open(_logfilename('keepproxy'), WRITE_MODE)
kp = subprocess.Popen(
['arvados-server', 'keepproxy'], env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ _detachedSubprocesses.append(kp)
with open(_pidfile('keepproxy'), 'w') as f:
f.write(str(kp.pid))
@@ -601,6 +610,7 @@ def run_arv_git_httpd():
logf = open(_logfilename('githttpd'), WRITE_MODE)
agh = subprocess.Popen(['arvados-server', 'git-httpd'],
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+ _detachedSubprocesses.append(agh)
with open(_pidfile('githttpd'), 'w') as f:
f.write(str(agh.pid))
_wait_until_port_listens(gitport)
@@ -621,6 +631,7 @@ def run_keep_web():
keepweb = subprocess.Popen(
['arvados-server', 'keep-web'],
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+ _detachedSubprocesses.append(keepweb)
with open(_pidfile('keep-web'), 'w') as f:
f.write(str(keepweb.pid))
_wait_until_port_listens(keepwebport)
@@ -635,8 +646,8 @@ def run_nginx():
return
stop_nginx()
nginxconf = {}
- nginxconf['UPSTREAMHOST'] = 'localhost'
- nginxconf['LISTENHOST'] = 'localhost'
+ nginxconf['UPSTREAMHOST'] = '127.0.0.1'
+ nginxconf['LISTENHOST'] = '127.0.0.1'
nginxconf['CONTROLLERPORT'] = internal_port_from_config("Controller")
nginxconf['ARVADOS_API_HOST'] = "0.0.0.0:" + str(external_port_from_config("Controller"))
nginxconf['CONTROLLERSSLPORT'] = external_port_from_config("Controller")
@@ -651,7 +662,6 @@ def run_nginx():
nginxconf['HEALTHSSLPORT'] = external_port_from_config("Health")
nginxconf['WSPORT'] = internal_port_from_config("Websocket")
nginxconf['WSSSLPORT'] = external_port_from_config("Websocket")
- nginxconf['WORKBENCH1PORT'] = internal_port_from_config("Workbench1")
nginxconf['WORKBENCH1SSLPORT'] = external_port_from_config("Workbench1")
nginxconf['WORKBENCH2PORT'] = internal_port_from_config("Workbench2")
nginxconf['WORKBENCH2SSLPORT'] = external_port_from_config("Workbench2")
@@ -660,6 +670,7 @@ def run_nginx():
nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
nginxconf['ERRORLOG'] = _logfilename('nginx_error')
nginxconf['TMPDIR'] = TEST_TMPDIR + '/nginx'
+ nginxconf['INTERNALSUBNETS'] = '169.254.0.0/16 0;'
conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')
conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
@@ -677,6 +688,7 @@ def run_nginx():
'-g', 'error_log stderr info; pid '+_pidfile('nginx')+';',
'-c', conffile],
env=env, stdin=open('/dev/null'), stdout=sys.stderr)
+ _detachedSubprocesses.append(nginx)
_wait_until_port_listens(nginxconf['CONTROLLERSSLPORT'])
def setup_config():
@@ -685,7 +697,6 @@ def setup_config():
controller_external_port = find_available_port()
websocket_port = find_available_port()
websocket_external_port = find_available_port()
- workbench1_port = find_available_port()
workbench1_external_port = find_available_port()
workbench2_port = find_available_port()
workbench2_external_port = find_available_port()
@@ -737,9 +748,6 @@ def setup_config():
},
"Workbench1": {
"ExternalURL": "https://%s:%s/" % (localhost, workbench1_external_port),
- "InternalURLs": {
- "http://%s:%s"%(localhost, workbench1_port): {},
- },
},
"Workbench2": {
"ExternalURL": "https://%s:%s/" % (localhost, workbench2_external_port),
@@ -791,6 +799,7 @@ def setup_config():
"SystemRootToken": auth_token('system_user'),
"API": {
"RequestTimeout": "30s",
+ "LockBeforeUpdate": True,
},
"Login": {
"Test": {
@@ -832,6 +841,9 @@ def setup_config():
"GitInternalDir": os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'internal.git'),
},
"LocalKeepBlobBuffersPerVCPU": 0,
+ "Logging": {
+ "SweepInterval": 0, # disable, otherwise test cases can't acquire dblock
+ },
"SupportedDockerImageFormats": {"v1": {}},
"ShellAccess": {
"Admin": True,
@@ -925,7 +937,6 @@ class TestCaseWithServers(unittest.TestCase):
cls._orig_config = arvados.config.settings().copy()
cls._cleanup_funcs = []
os.environ.pop('ARVADOS_KEEP_SERVICES', None)
- os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
for server_kwargs, start_func, stop_func in (
(cls.MAIN_SERVER, run, reset),
(cls.WS_SERVER, run_ws, stop_ws),
@@ -985,8 +996,8 @@ if __name__ == "__main__":
host = os.environ['ARVADOS_API_HOST']
if args.auth is not None:
token = auth_token(args.auth)
- print("export ARVADOS_API_TOKEN={}".format(pipes.quote(token)))
- print("export ARVADOS_API_HOST={}".format(pipes.quote(host)))
+ print("export ARVADOS_API_TOKEN={}".format(shlex.quote(token)))
+ print("export ARVADOS_API_HOST={}".format(shlex.quote(host)))
print("export ARVADOS_API_HOST_INSECURE=true")
else:
print(host)
diff --git a/sdk/python/tests/test_api.py b/sdk/python/tests/test_api.py
index c249f46d3c..2768d2e7cf 100644
--- a/sdk/python/tests/test_api.py
+++ b/sdk/python/tests/test_api.py
@@ -7,22 +7,33 @@ from builtins import str
from builtins import range
import arvados
import collections
+import contextlib
import httplib2
import itertools
import json
+import logging
import mimetypes
import os
import socket
import string
+import sys
import unittest
+import urllib.parse as urlparse
-import mock
+from unittest import mock
from . import run_test_server
from apiclient import errors as apiclient_errors
from apiclient import http as apiclient_http
-from arvados.api import OrderedJsonModel, RETRY_DELAY_INITIAL, RETRY_DELAY_BACKOFF, RETRY_COUNT
-from .arvados_testutil import fake_httplib2_response, queue_with
+from arvados.api import (
+ api_client,
+ normalize_api_kwargs,
+ api_kwargs_from_config,
+ OrderedJsonModel,
+ _googleapiclient_log_lock,
+)
+from .arvados_testutil import fake_httplib2_response, mock_api_responses, queue_with
+import httplib2.error
if not mimetypes.inited:
mimetypes.init()
@@ -30,12 +41,30 @@ if not mimetypes.inited:
class ArvadosApiTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
ERROR_HEADERS = {'Content-Type': mimetypes.types_map['.json']}
+ RETRIED_4XX = frozenset([408, 409, 423])
def api_error_response(self, code, *errors):
return (fake_httplib2_response(code, **self.ERROR_HEADERS),
json.dumps({'errors': errors,
'error_token': '1234567890+12345678'}).encode())
+ def _config_from_environ(self):
+ return {
+ key: value
+ for key, value in os.environ.items()
+ if key.startswith('ARVADOS_API_')
+ }
+
+ def _discoveryServiceUrl(
+ self,
+ host=None,
+ path='/discovery/v1/apis/{api}/{apiVersion}/rest',
+ scheme='https',
+ ):
+ if host is None:
+ host = os.environ['ARVADOS_API_HOST']
+ return urlparse.urlunsplit((scheme, host, path, None, None))
+
def test_new_api_objects_with_cache(self):
clients = [arvados.api('v1', cache=True) for index in [0, 1]]
self.assertIsNot(*clients)
@@ -124,6 +153,57 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
self.assertEqual(api._http.timeout, 1234,
"Requested timeout value was 1234")
+ def test_4xx_retried(self):
+ client = arvados.api('v1')
+ for code in self.RETRIED_4XX:
+ name = f'retried #{code}'
+ with self.subTest(name), mock.patch('time.sleep'):
+ expected = {'username': name}
+ with mock_api_responses(
+ client,
+ json.dumps(expected),
+ [code, code, 200],
+ self.ERROR_HEADERS,
+ 'orig_http_request',
+ ):
+ actual = client.users().current().execute()
+ self.assertEqual(actual, expected)
+
+ def test_4xx_not_retried(self):
+ client = arvados.api('v1', num_retries=3)
+ # Note that googleapiclient does retry 403 *if* the response JSON
+ # includes flags that say the request was denied by rate limiting.
+ # An empty JSON response like we use here should not be retried.
+ for code in [400, 401, 403, 404, 422]:
+ with self.subTest(f'error {code}'), mock.patch('time.sleep'):
+ with mock_api_responses(
+ client,
+ b'{}',
+ [code, 200],
+ self.ERROR_HEADERS,
+ 'orig_http_request',
+ ), self.assertRaises(arvados.errors.ApiError) as exc_check:
+ client.users().current().execute()
+ response = exc_check.exception.args[0]
+ self.assertEqual(response.status, code)
+ self.assertEqual(response.get('status'), str(code))
+
+ def test_4xx_raised_after_retry_exhaustion(self):
+ client = arvados.api('v1', num_retries=1)
+ for code in self.RETRIED_4XX:
+ with self.subTest(f'failed {code}'), mock.patch('time.sleep'):
+ with mock_api_responses(
+ client,
+ b'{}',
+ [code, code, code, 200],
+ self.ERROR_HEADERS,
+ 'orig_http_request',
+ ), self.assertRaises(arvados.errors.ApiError) as exc_check:
+ client.users().current().execute()
+ response = exc_check.exception.args[0]
+ self.assertEqual(response.status, code)
+ self.assertEqual(response.get('status'), str(code))
+
def test_ordered_json_model(self):
mock_responses = {
'arvados.humans.get': (
@@ -139,8 +219,294 @@ class ArvadosApiTest(run_test_server.TestCaseWithServers):
result = api.humans().get(uuid='test').execute()
self.assertEqual(string.hexdigits, ''.join(list(result.keys())))
+ def test_api_is_threadsafe(self):
+ api_kwargs = {
+ 'host': os.environ['ARVADOS_API_HOST'],
+ 'token': os.environ['ARVADOS_API_TOKEN'],
+ 'insecure': True,
+ }
+ config_kwargs = {'apiconfig': os.environ}
+ for api_constructor, kwargs in [
+ (arvados.api, {}),
+ (arvados.api, api_kwargs),
+ (arvados.api_from_config, {}),
+ (arvados.api_from_config, config_kwargs),
+ ]:
+ sub_kwargs = "kwargs" if kwargs else "no kwargs"
+ with self.subTest(f"{api_constructor.__name__} with {sub_kwargs}"):
+ api_client = api_constructor('v1', **kwargs)
+ self.assertTrue(hasattr(api_client, 'localapi'),
+ f"client missing localapi method")
+ self.assertTrue(hasattr(api_client, 'keep'),
+ f"client missing keep attribute")
+
+ def test_api_host_constructor(self):
+ cache = True
+ insecure = True
+ client = arvados.api(
+ 'v1',
+ cache,
+ os.environ['ARVADOS_API_HOST'],
+ os.environ['ARVADOS_API_TOKEN'],
+ insecure,
+ )
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],
+ "client constructed with incorrect token")
+
+ def test_api_url_constructor(self):
+ client = arvados.api(
+ 'v1',
+ discoveryServiceUrl=self._discoveryServiceUrl(),
+ token=os.environ['ARVADOS_API_TOKEN'],
+ insecure=True,
+ )
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],
+ "client constructed with incorrect token")
+
+ def test_api_bad_args(self):
+ all_kwargs = {
+ 'host': os.environ['ARVADOS_API_HOST'],
+ 'token': os.environ['ARVADOS_API_TOKEN'],
+ 'discoveryServiceUrl': self._discoveryServiceUrl(),
+ }
+ for use_keys in [
+ # Passing only a single key is missing required info
+ *([key] for key in all_kwargs.keys()),
+ # Passing all keys is a conflict
+ list(all_kwargs.keys()),
+ ]:
+ kwargs = {key: all_kwargs[key] for key in use_keys}
+ kwargs_list = ', '.join(use_keys)
+ with self.subTest(f"calling arvados.api with {kwargs_list} fails"), \
+ self.assertRaises(ValueError):
+ arvados.api('v1', insecure=True, **kwargs)
+
+ def test_api_bad_url(self):
+ for bad_kwargs in [
+ {'discoveryServiceUrl': self._discoveryServiceUrl() + '/BadTestURL'},
+ {'version': 'BadTestVersion', 'host': os.environ['ARVADOS_API_HOST']},
+ ]:
+ bad_key = next(iter(bad_kwargs))
+ with self.subTest(f"api fails with bad {bad_key}"), \
+ self.assertRaises(apiclient_errors.UnknownApiNameOrVersion):
+ arvados.api(**bad_kwargs, token='test_api_bad_url', insecure=True)
+
+ def test_normalize_api_good_args(self):
+ for version, discoveryServiceUrl, host in [
+ ('Test1', None, os.environ['ARVADOS_API_HOST']),
+ (None, self._discoveryServiceUrl(), None)
+ ]:
+ argname = 'discoveryServiceUrl' if host is None else 'host'
+ with self.subTest(f"normalize_api_kwargs with {argname}"):
+ actual = normalize_api_kwargs(
+ version,
+ discoveryServiceUrl,
+ host,
+ os.environ['ARVADOS_API_TOKEN'],
+ insecure=True,
+ )
+ self.assertEqual(actual['discoveryServiceUrl'], self._discoveryServiceUrl())
+ self.assertEqual(actual['token'], os.environ['ARVADOS_API_TOKEN'])
+ self.assertEqual(actual['version'], version or 'v1')
+ self.assertTrue(actual['insecure'])
+ self.assertNotIn('host', actual)
+
+ def test_normalize_api_bad_args(self):
+ all_args = (
+ self._discoveryServiceUrl(),
+ os.environ['ARVADOS_API_HOST'],
+ os.environ['ARVADOS_API_TOKEN'],
+ )
+ for arg_index, arg_value in enumerate(all_args):
+ args = [None] * len(all_args)
+ args[arg_index] = arg_value
+ with self.subTest(f"normalize_api_kwargs with only arg #{arg_index + 1}"), \
+ self.assertRaises(ValueError):
+ normalize_api_kwargs('v1', *args)
+ with self.subTest("normalize_api_kwargs with discoveryServiceUrl and host"), \
+ self.assertRaises(ValueError):
+ normalize_api_kwargs('v1', *all_args)
+
+ def test_api_from_config_default(self):
+ client = arvados.api_from_config('v1')
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],
+ "client constructed with incorrect token")
+
+ def test_api_from_config_explicit(self):
+ config = self._config_from_environ()
+ client = arvados.api_from_config('v1', config)
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],
+ "client constructed with incorrect token")
+
+ def test_api_from_bad_config(self):
+ base_config = self._config_from_environ()
+ for del_key in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
+ with self.subTest(f"api_from_config without {del_key} fails"), \
+ self.assertRaises(ValueError):
+ config = dict(base_config)
+ del config[del_key]
+ arvados.api_from_config('v1', config)
+
+ def test_api_kwargs_from_good_config(self):
+ for config in [None, self._config_from_environ()]:
+ conf_type = 'default' if config is None else 'passed'
+ with self.subTest(f"api_kwargs_from_config with {conf_type} config"):
+ version = 'Test1' if config else None
+ actual = api_kwargs_from_config(version, config)
+ self.assertEqual(actual['discoveryServiceUrl'], self._discoveryServiceUrl())
+ self.assertEqual(actual['token'], os.environ['ARVADOS_API_TOKEN'])
+ self.assertEqual(actual['version'], version or 'v1')
+ self.assertTrue(actual['insecure'])
+ self.assertNotIn('host', actual)
+
+ def test_api_kwargs_from_bad_config(self):
+ base_config = self._config_from_environ()
+ for del_key in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
+ with self.subTest(f"api_kwargs_from_config without {del_key} fails"), \
+ self.assertRaises(ValueError):
+ config = dict(base_config)
+ del config[del_key]
+ api_kwargs_from_config('v1', config)
+
+ def test_api_client_constructor(self):
+ client = api_client(
+ 'v1',
+ self._discoveryServiceUrl(),
+ os.environ['ARVADOS_API_TOKEN'],
+ insecure=True,
+ )
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],
+ "client constructed with incorrect token")
+ self.assertFalse(
+ hasattr(client, 'localapi'),
+ "client has localapi method when it should not be thread-safe",
+ )
+
+ def test_api_client_bad_url(self):
+ all_args = ('v1', self._discoveryServiceUrl(), 'test_api_client_bad_url')
+ for arg_index, arg_value in [
+ (0, 'BadTestVersion'),
+ (1, all_args[1] + '/BadTestURL'),
+ ]:
+ with self.subTest(f"api_client fails with {arg_index}={arg_value!r}"), \
+ self.assertRaises(apiclient_errors.UnknownApiNameOrVersion):
+ args = list(all_args)
+ args[arg_index] = arg_value
+ api_client(*args, insecure=True)
+
+ def test_initial_retry_logs(self):
+ try:
+ _googleapiclient_log_lock.release()
+ except RuntimeError:
+ # Lock was never acquired - that's the state we want anyway
+ pass
+ real_logger = logging.getLogger('googleapiclient.http')
+ mock_logger = mock.Mock(wraps=real_logger)
+ mock_logger.handlers = logging.getLogger('googleapiclient').handlers
+ mock_logger.level = logging.NOTSET
+ with mock.patch('logging.getLogger', return_value=mock_logger), \
+ mock.patch('time.sleep'), \
+ self.assertLogs(real_logger, 'INFO') as actual_logs:
+ try:
+ api_client('v1', 'https://test.invalid/', 'NoToken', num_retries=1)
+ except httplib2.error.ServerNotFoundError:
+ pass
+ mock_logger.addFilter.assert_called()
+ mock_logger.addHandler.assert_called()
+ mock_logger.setLevel.assert_called()
+ mock_logger.removeHandler.assert_called()
+ mock_logger.removeFilter.assert_called()
+ self.assertRegex(actual_logs.output[0], r'^INFO:googleapiclient\.http:Sleeping \d')
+
+ def test_configured_logger_untouched(self):
+ real_logger = logging.getLogger('googleapiclient.http')
+ mock_logger = mock.Mock(wraps=real_logger)
+ mock_logger.handlers = logging.getLogger().handlers
+ with mock.patch('logging.getLogger', return_value=mock_logger), \
+ mock.patch('time.sleep'):
+ try:
+ api_client('v1', 'https://test.invalid/', 'NoToken', num_retries=1)
+ except httplib2.error.ServerNotFoundError:
+ pass
+ mock_logger.addFilter.assert_not_called()
+ mock_logger.addHandler.assert_not_called()
+ mock_logger.setLevel.assert_not_called()
+ mock_logger.removeHandler.assert_not_called()
+ mock_logger.removeFilter.assert_not_called()
-class RetryREST(unittest.TestCase):
+
+class ConstructNumRetriesTestCase(unittest.TestCase):
+ @staticmethod
+ def _fake_retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs):
+ return http.request(uri, method, *args, **kwargs)
+
+ @contextlib.contextmanager
+ def patch_retry(self):
+ # We have this dedicated context manager that goes through `sys.modules`
+ # instead of just using `mock.patch` because of the unfortunate
+ # `arvados.api` name collision.
+ orig_func = sys.modules['arvados.api']._orig_retry_request
+ expect_name = 'googleapiclient.http._retry_request'
+ self.assertEqual(
+ '{0.__module__}.{0.__name__}'.format(orig_func), expect_name,
+ f"test setup problem: {expect_name} not at arvados.api._orig_retry_request",
+ )
+ retry_mock = mock.Mock(wraps=self._fake_retry_request)
+ sys.modules['arvados.api']._orig_retry_request = retry_mock
+ try:
+ yield retry_mock
+ finally:
+ sys.modules['arvados.api']._orig_retry_request = orig_func
+
+ def _iter_num_retries(self, retry_mock):
+ for call in retry_mock.call_args_list:
+ try:
+ yield call.args[1]
+ except IndexError:
+ yield call.kwargs['num_retries']
+
+ def test_default_num_retries(self):
+ with self.patch_retry() as retry_mock:
+ client = arvados.api('v1')
+ actual = set(self._iter_num_retries(retry_mock))
+ self.assertEqual(len(actual), 1)
+ self.assertTrue(actual.pop() > 6, "num_retries lower than expected")
+
+ def _test_calls(self, init_arg, call_args, expected):
+ with self.patch_retry() as retry_mock:
+ client = arvados.api('v1', num_retries=init_arg)
+ for num_retries in call_args:
+ client.users().current().execute(num_retries=num_retries)
+ actual = self._iter_num_retries(retry_mock)
+ # The constructor makes two requests with its num_retries argument:
+ # one for the discovery document, and one for the config.
+ self.assertEqual(next(actual, None), init_arg)
+ self.assertEqual(next(actual, None), init_arg)
+ self.assertEqual(list(actual), expected)
+
+ def test_discovery_num_retries(self):
+ for num_retries in [0, 5, 55]:
+ with self.subTest(f"num_retries={num_retries}"):
+ self._test_calls(num_retries, [], [])
+
+ def test_num_retries_called_le_init(self):
+ for n in [6, 10]:
+ with self.subTest(f"init_arg={n}"):
+ call_args = [n - 4, n - 2, n]
+ expected = [n] * 3
+ self._test_calls(n, call_args, expected)
+
+ def test_num_retries_called_ge_init(self):
+ for n in [0, 10]:
+ with self.subTest(f"init_arg={n}"):
+ call_args = [n, n + 4, n + 8]
+ self._test_calls(n, call_args, call_args)
+
+ def test_num_retries_called_mixed(self):
+ self._test_calls(5, [2, 6, 4, 8], [5, 6, 5, 8])
+
+
+class PreCloseSocketTestCase(unittest.TestCase):
def setUp(self):
self.api = arvados.api('v1')
self.assertTrue(hasattr(self.api._http, 'orig_http_request'),
@@ -152,59 +518,6 @@ class RetryREST(unittest.TestCase):
# All requests succeed by default. Tests override as needed.
self.api._http.orig_http_request.return_value = self.request_success
- @mock.patch('time.sleep')
- def test_socket_error_retry_get(self, sleep):
- self.api._http.orig_http_request.side_effect = (
- socket.error('mock error'),
- self.request_success,
- )
- self.assertEqual(self.api.users().current().execute(),
- self.mock_response)
- self.assertGreater(self.api._http.orig_http_request.call_count, 1,
- "client got the right response without retrying")
- self.assertEqual(sleep.call_args_list,
- [mock.call(RETRY_DELAY_INITIAL)])
-
- @mock.patch('time.sleep')
- def test_same_automatic_request_id_on_retry(self, sleep):
- self.api._http.orig_http_request.side_effect = (
- socket.error('mock error'),
- self.request_success,
- )
- self.api.users().current().execute()
- calls = self.api._http.orig_http_request.call_args_list
- self.assertEqual(len(calls), 2)
- self.assertEqual(
- calls[0][1]['headers']['X-Request-Id'],
- calls[1][1]['headers']['X-Request-Id'])
- self.assertRegex(calls[0][1]['headers']['X-Request-Id'], r'^req-[a-z0-9]{20}$')
-
- @mock.patch('time.sleep')
- def test_provided_request_id_on_retry(self, sleep):
- self.api.request_id='fake-request-id'
- self.api._http.orig_http_request.side_effect = (
- socket.error('mock error'),
- self.request_success,
- )
- self.api.users().current().execute()
- calls = self.api._http.orig_http_request.call_args_list
- self.assertEqual(len(calls), 2)
- for call in calls:
- self.assertEqual(call[1]['headers']['X-Request-Id'], 'fake-request-id')
-
- @mock.patch('time.sleep')
- def test_socket_error_retry_delay(self, sleep):
- self.api._http.orig_http_request.side_effect = socket.error('mock')
- self.api._http._retry_count = 3
- with self.assertRaises(socket.error):
- self.api.users().current().execute()
- self.assertEqual(self.api._http.orig_http_request.call_count, 4)
- self.assertEqual(sleep.call_args_list, [
- mock.call(RETRY_DELAY_INITIAL),
- mock.call(RETRY_DELAY_INITIAL * RETRY_DELAY_BACKOFF),
- mock.call(RETRY_DELAY_INITIAL * RETRY_DELAY_BACKOFF**2),
- ])
-
@mock.patch('time.time', side_effect=[i*2**20 for i in range(99)])
def test_close_old_connections_non_retryable(self, sleep):
self._test_connection_close(expect=1)
@@ -228,18 +541,6 @@ class RetryREST(unittest.TestCase):
for c in mock_conns.values():
self.assertEqual(c.close.call_count, expect)
- @mock.patch('time.sleep')
- def test_socket_error_no_retry_post(self, sleep):
- self.api._http.orig_http_request.side_effect = (
- socket.error('mock error'),
- self.request_success,
- )
- with self.assertRaises(socket.error):
- self.api.users().create(body={}).execute()
- self.assertEqual(self.api._http.orig_http_request.call_count, 1,
- "client should try non-retryable method exactly once")
- self.assertEqual(sleep.call_args_list, [])
-
if __name__ == '__main__':
unittest.main()
diff --git a/sdk/python/tests/test_arv_get.py b/sdk/python/tests/test_arv_get.py
index 73ef2475b9..b66039dfe3 100644
--- a/sdk/python/tests/test_arv_get.py
+++ b/sdk/python/tests/test_arv_get.py
@@ -6,12 +6,13 @@ from __future__ import absolute_import
from future.utils import listitems
import io
import logging
-import mock
import os
import re
import shutil
import tempfile
+from unittest import mock
+
import arvados
import arvados.collection as collection
import arvados.commands.get as arv_get
@@ -88,7 +89,7 @@ class ArvadosGetTestCase(run_test_server.TestCaseWithServers,
def test_get_block(self):
# Get raw data using a block locator
- blk = re.search(' (acbd18\S+\+A\S+) ', self.col_manifest).group(1)
+ blk = re.search(r' (acbd18\S+\+A\S+) ', self.col_manifest).group(1)
r = self.run_get([blk, '-'])
self.assertEqual(0, r)
self.assertEqual(b'foo', self.stdout.getvalue())
diff --git a/sdk/python/tests/test_arv_keepdocker.py b/sdk/python/tests/test_arv_keepdocker.py
index 8fbfad4377..5d23dfb378 100644
--- a/sdk/python/tests/test_arv_keepdocker.py
+++ b/sdk/python/tests/test_arv_keepdocker.py
@@ -2,35 +2,37 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import absolute_import
import arvados
import collections
+import collections.abc
import copy
import hashlib
-import mock
+import logging
import os
import subprocess
import sys
import tempfile
import unittest
-import logging
+
+from pathlib import Path
+from unittest import mock
+
+import parameterized
import arvados.commands.keepdocker as arv_keepdocker
from . import arvados_testutil as tutil
-from . import run_test_server
-
class StopTest(Exception):
pass
class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
- def run_arv_keepdocker(self, args, err):
+ def run_arv_keepdocker(self, args, err, **kwargs):
sys.argv = ['arv-keepdocker'] + args
log_handler = logging.StreamHandler(err)
arv_keepdocker.logger.addHandler(log_handler)
try:
- return arv_keepdocker.main()
+ return arv_keepdocker.main(**kwargs)
finally:
arv_keepdocker.logger.removeHandler(log_handler)
@@ -135,12 +137,19 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
self.run_arv_keepdocker(['repo:tag'], sys.stderr)
find_image_mock.assert_called_with('repo', 'tag')
+ def test_image_given_as_registry_repo_colon_tag(self):
with self.assertRaises(StopTest), \
mock.patch('arvados.commands.keepdocker.find_one_image_hash',
side_effect=StopTest) as find_image_mock:
self.run_arv_keepdocker(['myreg.example:8888/repo/img:tag'], sys.stderr)
find_image_mock.assert_called_with('myreg.example:8888/repo/img', 'tag')
+ with self.assertRaises(StopTest), \
+ mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+ side_effect=StopTest) as find_image_mock:
+ self.run_arv_keepdocker(['registry.hub.docker.com:443/library/debian:bullseye-slim'], sys.stderr)
+ find_image_mock.assert_called_with('registry.hub.docker.com/library/debian', 'bullseye-slim')
+
def test_image_has_colons(self):
with self.assertRaises(StopTest), \
mock.patch('arvados.commands.keepdocker.find_one_image_hash',
@@ -154,6 +163,27 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
self.run_arv_keepdocker(['[::1]/repo/img'], sys.stderr)
find_image_mock.assert_called_with('[::1]/repo/img', 'latest')
+ with self.assertRaises(StopTest), \
+ mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+ side_effect=StopTest) as find_image_mock:
+ self.run_arv_keepdocker(['[::1]:8888/repo/img:tag'], sys.stderr)
+ find_image_mock.assert_called_with('[::1]:8888/repo/img', 'tag')
+
+ def test_list_images_with_host_and_port(self):
+ api = arvados.api('v1')
+ taglink = api.links().create(body={'link': {
+ 'link_class': 'docker_image_repo+tag',
+ 'name': 'registry.example:1234/repo:latest',
+ 'head_uuid': 'zzzzz-4zz18-1v45jub259sjjgb',
+ }}).execute()
+ try:
+ out = tutil.StringIO()
+ with self.assertRaises(SystemExit):
+ self.run_arv_keepdocker([], sys.stderr, stdout=out)
+ self.assertRegex(out.getvalue(), '\nregistry.example:1234/repo +latest ')
+ finally:
+ api.links().delete(uuid=taglink['uuid']).execute()
+
@mock.patch('arvados.commands.keepdocker.list_images_in_arv',
return_value=[])
@mock.patch('arvados.commands.keepdocker.find_image_hashes',
@@ -198,3 +228,30 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
api().collections().update.assert_called_with(
uuid=mocked_collection['uuid'],
body={'properties': updated_properties})
+
+
+@parameterized.parameterized_class(('filename',), [
+ ('hello-world-ManifestV2.tar',),
+ ('hello-world-ManifestV2-OCILayout.tar',),
+])
+class ImageMetadataTestCase(unittest.TestCase):
+ DATA_PATH = Path(__file__).parent / 'data'
+
+ @classmethod
+ def setUpClass(cls):
+ cls.image_file = (cls.DATA_PATH / cls.filename).open('rb')
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.image_file.close()
+
+ def setUp(self):
+ self.manifest, self.config = arv_keepdocker.load_image_metadata(self.image_file)
+
+ def test_image_manifest(self):
+ self.assertIsInstance(self.manifest, collections.abc.Mapping)
+ self.assertEqual(self.manifest.get('RepoTags'), ['hello-world:latest'])
+
+ def test_image_config(self):
+ self.assertIsInstance(self.config, collections.abc.Mapping)
+ self.assertEqual(self.config.get('created'), '2023-05-02T16:49:27Z')
diff --git a/sdk/python/tests/test_arv_ls.py b/sdk/python/tests/test_arv_ls.py
index 635c6254ad..d48b94ffac 100644
--- a/sdk/python/tests/test_arv_ls.py
+++ b/sdk/python/tests/test_arv_ls.py
@@ -8,9 +8,10 @@ from builtins import range
import os
import random
import sys
-import mock
import tempfile
+from unittest import mock
+
import arvados.errors as arv_error
import arvados.commands.ls as arv_ls
from . import run_test_server
diff --git a/sdk/python/tests/test_arv_put.py b/sdk/python/tests/test_arv_put.py
index 0e531dee31..772a4f6b3e 100644
--- a/sdk/python/tests/test_arv_put.py
+++ b/sdk/python/tests/test_arv_put.py
@@ -16,7 +16,6 @@ import ciso8601
import datetime
import json
import logging
-import mock
import multiprocessing
import os
import pwd
@@ -32,6 +31,8 @@ import time
import unittest
import uuid
+from unittest import mock
+
import arvados
import arvados.commands.put as arv_put
from . import arvados_testutil as tutil
@@ -813,6 +814,7 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers,
def test_put_block_replication(self):
self.call_main_on_test_file()
+ arv_put.api_client = None
with mock.patch('arvados.collection.KeepClient.local_store_put') as put_mock:
put_mock.return_value = 'acbd18db4cc2f85cedef654fccc4a4d8+3'
self.call_main_on_test_file(['--replication', '1'])
diff --git a/sdk/python/tests/test_arvfile.py b/sdk/python/tests/test_arvfile.py
index b45a592ecd..b98e1e97d4 100644
--- a/sdk/python/tests/test_arvfile.py
+++ b/sdk/python/tests/test_arvfile.py
@@ -8,11 +8,12 @@ from builtins import str
from builtins import range
from builtins import object
import datetime
-import mock
import os
import unittest
import time
+from unittest import mock
+
import arvados
from arvados._ranges import Range
from arvados.keep import KeepLocator
@@ -27,6 +28,7 @@ class ArvadosFileWriterTestCase(unittest.TestCase):
def __init__(self, blocks):
self.blocks = blocks
self.requests = []
+ self.num_prefetch_threads = 1
def get(self, locator, num_retries=0, prefetch=False):
self.requests.append(locator)
return self.blocks.get(locator)
@@ -37,6 +39,8 @@ class ArvadosFileWriterTestCase(unittest.TestCase):
pdh = tutil.str_keep_locator(data)
self.blocks[pdh] = bytes(data)
return pdh
+ def block_prefetch(self, loc):
+ self.requests.append(loc)
class MockApi(object):
def __init__(self, b, r):
@@ -414,7 +418,7 @@ class ArvadosFileWriterTestCase(unittest.TestCase):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({}, {})
for r in [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [3, 2, 0, 4, 1]]:
- with Collection() as c:
+ with Collection(api_client=api, keep_client=keep) as c:
writer = c.open("count.txt", "rb+")
self.assertEqual(writer.size(), 0)
@@ -429,7 +433,7 @@ class ArvadosFileWriterTestCase(unittest.TestCase):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({}, {})
for r in [[0, 1, 2, 4], [4, 2, 1, 0], [2, 0, 4, 1]]:
- with Collection() as c:
+ with Collection(api_client=api, keep_client=keep) as c:
writer = c.open("count.txt", "rb+")
self.assertEqual(writer.size(), 0)
@@ -627,7 +631,8 @@ class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
def __init__(self, blocks, nocache):
self.blocks = blocks
self.nocache = nocache
- self.num_get_threads = 1
+ self._keep = ArvadosFileWriterTestCase.MockKeep({})
+ self.prefetch_lookahead = 0
def block_prefetch(self, loc):
pass
@@ -689,8 +694,60 @@ class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
with Collection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
r = c.open("count.txt", "rb")
self.assertEqual(b"0123", r.read(4))
- self.assertIn("2e9ec317e197819358fbc43afca7d837+8", keep.requests)
- self.assertIn("e8dc4081b13434b45189a720b77b6818+8", keep.requests)
+ self.assertEqual(["2e9ec317e197819358fbc43afca7d837+8",
+ "e8dc4081b13434b45189a720b77b6818+8"], keep.requests)
+
+ def test_prefetch_disabled(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({
+ "2e9ec317e197819358fbc43afca7d837+8": b"01234567",
+ "e8dc4081b13434b45189a720b77b6818+8": b"abcdefgh",
+ })
+ keep.num_prefetch_threads = 0
+ with Collection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
+ r = c.open("count.txt", "rb")
+ self.assertEqual(b"0123", r.read(4))
+
+ self.assertEqual(["2e9ec317e197819358fbc43afca7d837+8"], keep.requests)
+
+ def test_prefetch_first_read_only(self):
+ # test behavior that prefetch only happens every 128 reads
+ # check that it doesn't make a prefetch request on the second read
+ keep = ArvadosFileWriterTestCase.MockKeep({
+ "2e9ec317e197819358fbc43afca7d837+8": b"01234567",
+ "e8dc4081b13434b45189a720b77b6818+8": b"abcdefgh",
+ })
+ with Collection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
+ r = c.open("count.txt", "rb")
+ self.assertEqual(b"0123", r.read(4))
+ self.assertEqual(b"45", r.read(2))
+ self.assertEqual(["2e9ec317e197819358fbc43afca7d837+8",
+ "e8dc4081b13434b45189a720b77b6818+8",
+ "2e9ec317e197819358fbc43afca7d837+8"], keep.requests)
+ self.assertEqual(3, len(keep.requests))
+
+ def test_prefetch_again(self):
+ # test behavior that prefetch only happens every 128 reads
+ # check that it does make another prefetch request after 128 reads
+ keep = ArvadosFileWriterTestCase.MockKeep({
+ "2e9ec317e197819358fbc43afca7d837+8": b"01234567",
+ "e8dc4081b13434b45189a720b77b6818+8": b"abcdefgh",
+ })
+ with Collection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
+ r = c.open("count.txt", "rb")
+ for i in range(0, 129):
+ r.seek(0)
+ self.assertEqual(b"0123", r.read(4))
+ self.assertEqual(["2e9ec317e197819358fbc43afca7d837+8",
+ "e8dc4081b13434b45189a720b77b6818+8",
+ "2e9ec317e197819358fbc43afca7d837+8",
+ "2e9ec317e197819358fbc43afca7d837+8"], keep.requests[0:4])
+ self.assertEqual(["2e9ec317e197819358fbc43afca7d837+8",
+ "2e9ec317e197819358fbc43afca7d837+8",
+ "2e9ec317e197819358fbc43afca7d837+8",
+ "e8dc4081b13434b45189a720b77b6818+8"], keep.requests[127:131])
+ # gets the 1st block 129 times from keep (cache),
+ # and the 2nd block twice to get 131 requests
+ self.assertEqual(131, len(keep.requests))
def test__eq__from_manifest(self):
with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
diff --git a/sdk/python/tests/test_cache.py b/sdk/python/tests/test_cache.py
index 259acd0a30..d86c7337e1 100644
--- a/sdk/python/tests/test_cache.py
+++ b/sdk/python/tests/test_cache.py
@@ -8,7 +8,6 @@ from __future__ import absolute_import
from builtins import str
from builtins import range
import hashlib
-import mock
import os
import random
import shutil
@@ -17,11 +16,12 @@ import tempfile
import threading
import unittest
+from unittest import mock
+
import arvados
import arvados.cache
from . import run_test_server
-
def _random(n):
return bytearray(random.getrandbits(8) for _ in range(n))
diff --git a/sdk/python/tests/test_cmd_util.py b/sdk/python/tests/test_cmd_util.py
new file mode 100644
index 0000000000..ffd45aa4b7
--- /dev/null
+++ b/sdk/python/tests/test_cmd_util.py
@@ -0,0 +1,194 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import contextlib
+import copy
+import itertools
+import json
+import os
+import tempfile
+import unittest
+
+from pathlib import Path
+
+from parameterized import parameterized
+
+import arvados.commands._util as cmd_util
+
+FILE_PATH = Path(__file__)
+
+class ValidateFiltersTestCase(unittest.TestCase):
+ NON_FIELD_TYPES = [
+ None,
+ 123,
+ ('name', '=', 'tuple'),
+ {'filters': ['name', '=', 'object']},
+ ]
+ NON_FILTER_TYPES = NON_FIELD_TYPES + ['string']
+ VALID_FILTERS = [
+ ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890'],
+ ['name', 'in', ['foo', 'bar']],
+ '(replication_desired > replication_cofirmed)',
+ '(replication_confirmed>=replication_desired)',
+ ]
+
+ @parameterized.expand(itertools.combinations(VALID_FILTERS, 2))
+ def test_valid_filters(self, f1, f2):
+ expected = [f1, f2]
+ actual = cmd_util.validate_filters(copy.deepcopy(expected))
+ self.assertEqual(actual, expected)
+
+ @parameterized.expand([(t,) for t in NON_FILTER_TYPES])
+ def test_filters_wrong_type(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filters are not a list\b'):
+ cmd_util.validate_filters(value)
+
+ @parameterized.expand([(t,) for t in NON_FIELD_TYPES])
+ def test_single_filter_wrong_type(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filter at index 0 is not a string or list\b'):
+ cmd_util.validate_filters([value])
+
+ @parameterized.expand([
+ ([],),
+ (['owner_uuid'],),
+ (['owner_uuid', 'zzzzz-tpzed-12345abcde67890'],),
+ (['name', 'not in', 'foo', 'bar'],),
+ (['name', 'in', 'foo', 'bar', 'baz'],),
+ ])
+ def test_filters_wrong_arity(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filter at index 0 does not have three items\b'):
+ cmd_util.validate_filters([value])
+
+ @parameterized.expand(itertools.product(
+ [0, 1],
+ NON_FIELD_TYPES,
+ ))
+ def test_filter_definition_wrong_type(self, index, bad_value):
+ value = ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890']
+ value[index] = bad_value
+ name = ('field name', 'operator')[index]
+ with self.assertRaisesRegex(ValueError, rf'^filter at index 0 {name} is not a string\b'):
+ cmd_util.validate_filters([value])
+
+ @parameterized.expand([
+ # Not enclosed in parentheses
+ 'foo = bar',
+ '(foo) < bar',
+ 'foo > (bar)',
+ # Not exactly one operator
+ '(a >= b >= c)',
+ '(foo)',
+ '(file_count version)',
+ # Invalid field identifiers
+ '(version = 1)',
+ '(2 = file_count)',
+ '(replication.desired <= replication.confirmed)',
+ # Invalid whitespace
+ '(file_count\t=\tversion)',
+ '(file_count >= version\n)',
+ ])
+ def test_invalid_string_filter(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filter at index 0 has invalid syntax\b'):
+ cmd_util.validate_filters([value])
+
+
+class JSONArgumentTestCase(unittest.TestCase):
+ JSON_OBJECTS = [
+ None,
+ 123,
+ 456.789,
+ 'string',
+ ['list', 1],
+ {'object': True, 'yaml': False},
+ ]
+
+ @classmethod
+ def setUpClass(cls):
+ cls.json_file = tempfile.NamedTemporaryFile(
+ 'w+',
+ encoding='utf-8',
+ prefix='argtest',
+ suffix='.json',
+ )
+ cls.parser = cmd_util.JSONArgument()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.json_file.close()
+
+ def setUp(self):
+ self.json_file.seek(0)
+ self.json_file.truncate()
+
+ @parameterized.expand((obj,) for obj in JSON_OBJECTS)
+ def test_valid_argument_string(self, obj):
+ actual = self.parser(json.dumps(obj))
+ self.assertEqual(actual, obj)
+
+ @parameterized.expand((obj,) for obj in JSON_OBJECTS)
+ def test_valid_argument_path(self, obj):
+ json.dump(obj, self.json_file)
+ self.json_file.flush()
+ actual = self.parser(self.json_file.name)
+ self.assertEqual(actual, obj)
+
+ @parameterized.expand([
+ '',
+ '\0',
+ None,
+ ])
+ def test_argument_not_json_or_path(self, value):
+ if value is None:
+ with tempfile.NamedTemporaryFile() as gone_file:
+ value = gone_file.name
+ with self.assertRaisesRegex(ValueError, r'\bnot a valid JSON string or file path\b'):
+ self.parser(value)
+
+ @parameterized.expand([
+ FILE_PATH.parent,
+ FILE_PATH / 'nonexistent.json',
+ None,
+ ])
+ def test_argument_path_unreadable(self, path):
+ if path is None:
+ bad_file = tempfile.NamedTemporaryFile()
+ os.chmod(bad_file.fileno(), 0o000)
+ path = bad_file.name
+ @contextlib.contextmanager
+ def ctx():
+ try:
+ yield
+ finally:
+ os.chmod(bad_file.fileno(), 0o600)
+ else:
+ ctx = contextlib.nullcontext
+ with self.assertRaisesRegex(ValueError, rf'^error reading JSON file path {str(path)!r}: '), ctx():
+ self.parser(str(path))
+
+ @parameterized.expand([
+ FILE_PATH,
+ None,
+ ])
+ def test_argument_path_not_json(self, path):
+ if path is None:
+ path = self.json_file.name
+ with self.assertRaisesRegex(ValueError, rf'^error decoding JSON from file {str(path)!r}'):
+ self.parser(str(path))
+
+
+class JSONArgumentValidationTestCase(unittest.TestCase):
+ @parameterized.expand((obj,) for obj in JSONArgumentTestCase.JSON_OBJECTS)
+ def test_object_returned_from_validator(self, value):
+ parser = cmd_util.JSONArgument(lambda _: copy.deepcopy(value))
+ self.assertEqual(parser('{}'), value)
+
+ @parameterized.expand((obj,) for obj in JSONArgumentTestCase.JSON_OBJECTS)
+ def test_exception_raised_from_validator(self, value):
+ json_value = json.dumps(value)
+ def raise_func(_):
+ raise ValueError(json_value)
+ parser = cmd_util.JSONArgument(raise_func)
+ with self.assertRaises(ValueError) as exc_check:
+ parser(json_value)
+ self.assertEqual(exc_check.exception.args, (json_value,))
diff --git a/sdk/python/tests/test_collections.py b/sdk/python/tests/test_collections.py
index b4849c21ff..65b89056bb 100644
--- a/sdk/python/tests/test_collections.py
+++ b/sdk/python/tests/test_collections.py
@@ -7,7 +7,6 @@ from __future__ import absolute_import
from builtins import object
import arvados
import copy
-import mock
import os
import random
import re
@@ -16,11 +15,15 @@ import datetime
import ciso8601
import time
import unittest
+import parameterized
+
+from unittest import mock
from . import run_test_server
from arvados._ranges import Range, LocatorAndRange
from arvados.collection import Collection, CollectionReader
from . import arvados_testutil as tutil
+from .arvados_testutil import make_block_cache
class TestResumableWriter(arvados.ResumableCollectionWriter):
KEEP_BLOCK_SIZE = 1024 # PUT to Keep every 1K.
@@ -28,9 +31,10 @@ class TestResumableWriter(arvados.ResumableCollectionWriter):
def current_state(self):
return self.dump_state(copy.deepcopy)
-
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
tutil.ArvadosBaseTestCase):
+ disk_cache = False
MAIN_SERVER = {}
@classmethod
@@ -40,7 +44,8 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
run_test_server.authorize_with('admin')
cls.api_client = arvados.api('v1')
cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
- local_store=cls.local_store)
+ local_store=cls.local_store,
+ block_cache=make_block_cache(cls.disk_cache))
def write_foo_bar_baz(self):
cw = arvados.CollectionWriter(self.api_client)
@@ -319,6 +324,7 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
class MockKeep(object):
def __init__(self, content, num_retries=0):
self.content = content
+ self.num_prefetch_threads = 1
def get(self, locator, num_retries=0, prefetch=False):
return self.content[locator]
@@ -534,11 +540,11 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
self.mock_get_collection(client, status, 'foo_file')
return client
- def test_init_no_default_retries(self):
+ def test_init_default_retries(self):
client = self.api_client_mock(200)
reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
reader.manifest_text()
- client.collections().get().execute.assert_called_with(num_retries=0)
+ client.collections().get().execute.assert_called_with(num_retries=10)
def test_uuid_init_success(self):
client = self.api_client_mock(200)
@@ -588,7 +594,7 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
# Ensure stripped_manifest() doesn't mangle our manifest in
# any way other than stripping hints.
self.assertEqual(
- re.sub('\+[^\d\s\+]+', '', nonnormal),
+ re.sub(r'\+[^\d\s\+]+', '', nonnormal),
reader.stripped_manifest())
# Ensure stripped_manifest() didn't mutate our reader.
self.assertEqual(nonnormal, reader.manifest_text())
@@ -896,7 +902,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
c1.save_new()
loc = c1.manifest_locator()
c2 = Collection(loc)
- self.assertEqual(c1.manifest_text, c2.manifest_text)
+ self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))
self.assertEqual(c1.replication_desired, c2.replication_desired)
def test_replication_desired_not_loaded_if_provided(self):
@@ -905,7 +911,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
c1.save_new()
loc = c1.manifest_locator()
c2 = Collection(loc, replication_desired=2)
- self.assertEqual(c1.manifest_text, c2.manifest_text)
+ self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))
self.assertNotEqual(c1.replication_desired, c2.replication_desired)
def test_storage_classes_desired_kept_on_load(self):
@@ -914,7 +920,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
c1.save_new()
loc = c1.manifest_locator()
c2 = Collection(loc)
- self.assertEqual(c1.manifest_text, c2.manifest_text)
+ self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))
self.assertEqual(c1.storage_classes_desired(), c2.storage_classes_desired())
def test_storage_classes_change_after_save(self):
@@ -927,7 +933,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
c2.save(storage_classes=['highIO'])
self.assertEqual(['highIO'], c2.storage_classes_desired())
c3 = Collection(loc)
- self.assertEqual(c1.manifest_text, c3.manifest_text)
+ self.assertEqual(c1.manifest_text(strip=True), c3.manifest_text(strip=True))
self.assertEqual(['highIO'], c3.storage_classes_desired())
def test_storage_classes_desired_not_loaded_if_provided(self):
@@ -936,7 +942,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
c1.save_new()
loc = c1.manifest_locator()
c2 = Collection(loc, storage_classes_desired=['default'])
- self.assertEqual(c1.manifest_text, c2.manifest_text)
+ self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))
self.assertNotEqual(c1.storage_classes_desired(), c2.storage_classes_desired())
def test_init_manifest(self):
diff --git a/sdk/python/tests/test_events.py b/sdk/python/tests/test_events.py
index f5192160f3..a3a3267be7 100644
--- a/sdk/python/tests/test_events.py
+++ b/sdk/python/tests/test_events.py
@@ -2,25 +2,72 @@
#
# SPDX-License-Identifier: Apache-2.0
-from __future__ import print_function
-from __future__ import absolute_import
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-from builtins import range
-from builtins import object
+import json
import logging
-import mock
import queue
import sys
import threading
import time
import unittest
+from unittest import mock
+
+import websockets.exceptions as ws_exc
+
import arvados
from . import arvados_testutil as tutil
from . import run_test_server
+class FakeWebsocketClient:
+ """Fake self-contained version of websockets.sync.client.ClientConnection
+
+ This provides enough of the API to test EventClient. It loosely mimics
+ the Arvados WebSocket API by acknowledging subscribe messages. You can use
+ `mock_wrapper` to test calls. You can set `_check_lock` to test that the
+ given lock is acquired before `send` is called.
+ """
+
+ def __init__(self):
+ self._check_lock = None
+ self._closed = threading.Event()
+ self._messages = queue.Queue()
+
+ def mock_wrapper(self):
+ wrapper = mock.Mock(wraps=self)
+ wrapper.__iter__ = lambda _: self.__iter__()
+ return wrapper
+
+ def __iter__(self):
+ while True:
+ msg = self._messages.get()
+ self._messages.task_done()
+ if isinstance(msg, Exception):
+ raise msg
+ else:
+ yield msg
+
+ def close(self, code=1000, reason=''):
+ if not self._closed.is_set():
+ self._closed.set()
+ self.force_disconnect()
+
+ def force_disconnect(self):
+ self._messages.put(ws_exc.ConnectionClosed(None, None))
+
+ def send(self, msg):
+ if self._check_lock is not None and self._check_lock.acquire(blocking=False):
+ self._check_lock.release()
+ raise AssertionError(f"called ws_client.send() without lock")
+ elif self._closed.is_set():
+ raise ws_exc.ConnectionClosed(None, None)
+ try:
+ msg = json.loads(msg)
+ except ValueError:
+ status = 400
+ else:
+ status = 200
+ self._messages.put(json.dumps({'status': status}))
+
class WebsocketTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
@@ -201,7 +248,7 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
# close (im)properly
if close_unexpected:
- self.ws.ec.close_connection()
+ self.ws._client.close()
else:
self.ws.close()
@@ -240,69 +287,115 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
self._test_websocket_reconnect(False)
# Test websocket reconnection retry
- @mock.patch('arvados.events._EventClient.connect')
- def test_websocket_reconnect_retry(self, event_client_connect):
- event_client_connect.side_effect = [None, Exception('EventClient.connect error'), None]
-
+ @mock.patch('arvados.events.ws_client.connect')
+ def test_websocket_reconnect_retry(self, ws_conn):
logstream = tutil.StringIO()
rootLogger = logging.getLogger()
streamHandler = logging.StreamHandler(logstream)
rootLogger.addHandler(streamHandler)
-
- run_test_server.authorize_with('active')
- events = queue.Queue(100)
-
- filters = [['object_uuid', 'is_a', 'arvados#human']]
- self.ws = arvados.events.subscribe(
- arvados.api('v1'), filters,
- events.put_nowait,
- poll_fallback=False,
- last_log_id=None)
- self.assertIsInstance(self.ws, arvados.events.EventClient)
-
- # simulate improper close
- self.ws.on_closed()
-
- # verify log messages to ensure retry happened
- log_messages = logstream.getvalue()
- found = log_messages.find("Error 'EventClient.connect error' during websocket reconnect.")
- self.assertNotEqual(found, -1)
- rootLogger.removeHandler(streamHandler)
-
- @mock.patch('arvados.events._EventClient')
- def test_subscribe_method(self, websocket_client):
- filters = [['object_uuid', 'is_a', 'arvados#human']]
- client = arvados.events.EventClient(
- self.MOCK_WS_URL, [], lambda event: None, None)
- client.subscribe(filters[:], 99)
- websocket_client().subscribe.assert_called_with(filters, 99)
-
- @mock.patch('arvados.events._EventClient')
- def test_unsubscribe(self, websocket_client):
- filters = [['object_uuid', 'is_a', 'arvados#human']]
- client = arvados.events.EventClient(
- self.MOCK_WS_URL, filters[:], lambda event: None, None)
- client.unsubscribe(filters[:])
- websocket_client().unsubscribe.assert_called_with(filters)
-
- @mock.patch('arvados.events._EventClient')
+ try:
+ msg_event, wss_client, self.ws = self.fake_client(ws_conn)
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for setup callback")
+ msg_event.clear()
+ ws_conn.side_effect = [Exception('EventClient.connect error'), wss_client]
+ wss_client.force_disconnect()
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for reconnect callback")
+ # verify log messages to ensure retry happened
+ self.assertIn("Error 'EventClient.connect error' during websocket reconnect.", logstream.getvalue())
+ self.assertEqual(ws_conn.call_count, 3)
+ finally:
+ rootLogger.removeHandler(streamHandler)
+
+ @mock.patch('arvados.events.ws_client.connect')
def test_run_forever_survives_reconnects(self, websocket_client):
- connected = threading.Event()
- websocket_client().connect.side_effect = connected.set
client = arvados.events.EventClient(
self.MOCK_WS_URL, [], lambda event: None, None)
forever_thread = threading.Thread(target=client.run_forever)
forever_thread.start()
# Simulate an unexpected disconnect, and wait for reconnect.
- close_thread = threading.Thread(target=client.on_closed)
- close_thread.start()
- self.assertTrue(connected.wait(timeout=self.TEST_TIMEOUT))
- close_thread.join()
- run_forever_alive = forever_thread.is_alive()
- client.close()
- forever_thread.join()
- self.assertTrue(run_forever_alive)
- self.assertEqual(2, websocket_client().connect.call_count)
+ try:
+ client.on_closed()
+ self.assertTrue(forever_thread.is_alive())
+ self.assertEqual(2, websocket_client.call_count)
+ finally:
+ client.close()
+ forever_thread.join()
+
+ @staticmethod
+ def fake_client(conn_patch, filters=None, url=MOCK_WS_URL):
+ """Set up EventClient test infrastructure
+
+ Given a patch of `arvados.events.ws_client.connect`,
+ this returns a 3-tuple:
+
+ * `msg_event` is a `threading.Event` that is set as the test client
+ event callback. You can wait for this event to confirm that a
+ sent message has been acknowledged and processed.
+
+ * `mock_client` is a `mock.Mock` wrapper around `FakeWebsocketClient`.
+ Use this to assert `EventClient` calls the right methods. It tests
+ that `EventClient` acquires a lock before calling `send`.
+
+ * `client` is the `EventClient` that uses `mock_client` under the hood
+ that you exercise methods of.
+
+ Other arguments are passed to initialize `EventClient`.
+ """
+ msg_event = threading.Event()
+ fake_client = FakeWebsocketClient()
+ mock_client = fake_client.mock_wrapper()
+ conn_patch.return_value = mock_client
+ client = arvados.events.EventClient(url, filters, lambda _: msg_event.set())
+ fake_client._check_lock = client._subscribe_lock
+ return msg_event, mock_client, client
+
+ @mock.patch('arvados.events.ws_client.connect')
+ def test_subscribe_locking(self, ws_conn):
+ f = [['created_at', '>=', '2023-12-01T00:00:00.000Z']]
+ msg_event, wss_client, self.ws = self.fake_client(ws_conn)
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for setup callback")
+ msg_event.clear()
+ wss_client.send.reset_mock()
+ self.ws.subscribe(f)
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for subscribe callback")
+ wss_client.send.assert_called()
+ (msg,), _ = wss_client.send.call_args
+ self.assertEqual(
+ json.loads(msg),
+ {'method': 'subscribe', 'filters': f},
+ )
+
+ @mock.patch('arvados.events.ws_client.connect')
+ def test_unsubscribe_locking(self, ws_conn):
+ f = [['created_at', '>=', '2023-12-01T01:00:00.000Z']]
+ msg_event, wss_client, self.ws = self.fake_client(ws_conn, f)
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for setup callback")
+ msg_event.clear()
+ wss_client.send.reset_mock()
+ self.ws.unsubscribe(f)
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for unsubscribe callback")
+ wss_client.send.assert_called()
+ (msg,), _ = wss_client.send.call_args
+ self.assertEqual(
+ json.loads(msg),
+ {'method': 'unsubscribe', 'filters': f},
+ )
+
+ @mock.patch('arvados.events.ws_client.connect')
+ def test_resubscribe_locking(self, ws_conn):
+ f = [['created_at', '>=', '2023-12-01T02:00:00.000Z']]
+ msg_event, wss_client, self.ws = self.fake_client(ws_conn, f)
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for setup callback")
+ msg_event.clear()
+ wss_client.send.reset_mock()
+ wss_client.force_disconnect()
+ self.assertTrue(msg_event.wait(timeout=1), "timed out waiting for resubscribe callback")
+ wss_client.send.assert_called()
+ (msg,), _ = wss_client.send.call_args
+ self.assertEqual(
+ json.loads(msg),
+ {'method': 'subscribe', 'filters': f},
+ )
class PollClientTestCase(unittest.TestCase):
diff --git a/sdk/python/tests/test_http.py b/sdk/python/tests/test_http.py
new file mode 100644
index 0000000000..de63719453
--- /dev/null
+++ b/sdk/python/tests/test_http.py
@@ -0,0 +1,496 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+
+import copy
+import io
+import functools
+import hashlib
+import json
+import logging
+import sys
+import unittest
+import datetime
+
+from unittest import mock
+
+import arvados
+import arvados.collection
+import arvados.keep
+import pycurl
+
+from arvados.http_to_keep import http_to_keep
+
+# Turns out there was already "FakeCurl" that serves the same purpose, but
+# I wrote this before I knew that. Whoops.
+class CurlMock:
+ def __init__(self, headers = {}):
+ self.perform_was_called = False
+ self.headers = headers
+ self.get_response = 200
+ self.head_response = 200
+ self.req_headers = []
+
+ def setopt(self, op, *args):
+ if op == pycurl.URL:
+ self.url = args[0]
+ if op == pycurl.WRITEFUNCTION:
+ self.writefn = args[0]
+ if op == pycurl.HEADERFUNCTION:
+ self.headerfn = args[0]
+ if op == pycurl.NOBODY:
+ self.head = True
+ if op == pycurl.HTTPGET:
+ self.head = False
+ if op == pycurl.HTTPHEADER:
+ self.req_headers = args[0]
+
+ def getinfo(self, op):
+ if op == pycurl.RESPONSE_CODE:
+ if self.head:
+ return self.head_response
+ else:
+ return self.get_response
+
+ def perform(self):
+ self.perform_was_called = True
+
+ if self.head:
+ self.headerfn("HTTP/1.1 {} Status\r\n".format(self.head_response))
+ else:
+ self.headerfn("HTTP/1.1 {} Status\r\n".format(self.get_response))
+
+ for k,v in self.headers.items():
+ self.headerfn("%s: %s" % (k,v))
+
+ if not self.head and self.get_response == 200:
+ self.writefn(self.chunk)
+
+
+class TestHttpToKeep(unittest.TestCase):
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.Collection")
+ def test_http_get(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": []
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock()
+ mockobj.chunk = b'abc'
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
+ datetime.datetime(2018, 5, 15, 0, 0)))
+
+ assert mockobj.url == b"http://example.com/file1.txt"
+ assert mockobj.perform_was_called is True
+
+ cm.open.assert_called_with("file1.txt", "wb")
+ cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt",
+ owner_uuid=None, ensure_unique_name=True)
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+ ])
+
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.CollectionReader")
+ def test_http_expires(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ 'http://example.com/file1.txt': {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 17 May 2018 00:00:00 GMT'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock()
+ mockobj.chunk = b'abc'
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 16)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
+ datetime.datetime(2018, 5, 16, 0, 0)))
+
+ assert mockobj.perform_was_called is False
+
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.CollectionReader")
+ def test_http_cache_control(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ 'http://example.com/file1.txt': {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Cache-Control': 'max-age=172800'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock()
+ mockobj.chunk = b'abc'
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 16)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
+ 'http://example.com/file1.txt', datetime.datetime(2018, 5, 16, 0, 0)))
+
+ assert mockobj.perform_was_called is False
+
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.Collection")
+ def test_http_expired(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ 'http://example.com/file1.txt': {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Expires': 'Wed, 16 May 2018 00:00:00 GMT'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz4"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999997+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock({'Date': 'Thu, 17 May 2018 00:00:00 GMT'})
+ mockobj.chunk = b'def'
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999997+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz4',
+ 'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))
+
+
+ assert mockobj.url == b"http://example.com/file1.txt"
+ assert mockobj.perform_was_called is True
+
+ cm.open.assert_called_with("file1.txt", "wb")
+ cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt",
+ owner_uuid=None, ensure_unique_name=True)
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Thu, 17 May 2018 00:00:00 GMT'}}}})
+ ])
+
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.CollectionReader")
+ def test_http_etag(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ 'http://example.com/file1.txt': {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Expires': 'Wed, 16 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock({
+ 'Date': 'Thu, 17 May 2018 00:00:00 GMT',
+ 'Expires': 'Sat, 19 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ })
+ mockobj.chunk = None
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
+ datetime.datetime(2018, 5, 17, 0, 0)))
+
+ cm.open.assert_not_called()
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {'http://example.com/file1.txt': {
+ 'Date': 'Thu, 17 May 2018 00:00:00 GMT',
+ 'Expires': 'Sat, 19 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }}}})
+ ])
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.Collection")
+ def test_http_content_disp(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": []
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock({"Content-Disposition": "attachment; filename=file1.txt"})
+ mockobj.chunk = "abc"
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+ r = http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
+ 'http://example.com/download?fn=/file1.txt',
+ datetime.datetime(2018, 5, 15, 0, 0)))
+
+ assert mockobj.url == b"http://example.com/download?fn=/file1.txt"
+
+ cm.open.assert_called_with("file1.txt", "wb")
+ cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Fdownload%3Ffn%3D%2Ffile1.txt",
+ owner_uuid=None, ensure_unique_name=True)
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {"http://example.com/download?fn=/file1.txt": {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+ ])
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.CollectionReader")
+ def test_http_etag_if_none_match(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ 'http://example.com/file1.txt': {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock({
+ 'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ })
+ mockobj.chunk = None
+ mockobj.head_response = 403
+ mockobj.get_response = 304
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt",
+ 'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',
+ datetime.datetime(2018, 5, 17, 0, 0)))
+
+ print(mockobj.req_headers)
+ assert mockobj.req_headers == ["Accept: application/octet-stream", "If-None-Match: \"123456\""]
+ cm.open.assert_not_called()
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {'http://example.com/file1.txt': {
+ 'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }}}})
+ ])
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.CollectionReader")
+ def test_http_prefer_cached_downloads(self, collectionmock, curlmock):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ 'http://example.com/file1.txt': {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock()
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow, prefer_cached_downloads=True)
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
+ 'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))
+
+ assert mockobj.perform_was_called is False
+ cm.open.assert_not_called()
+ api.collections().update.assert_not_called()
+
+ @mock.patch("pycurl.Curl")
+ @mock.patch("arvados.collection.CollectionReader")
+ def test_http_varying_url_params(self, collectionmock, curlmock):
+ for prurl in ("http://example.com/file1.txt", "http://example.com/file1.txt?KeyId=123&Signature=456&Expires=789"):
+ api = mock.MagicMock()
+
+ api.collections().list().execute.return_value = {
+ "items": [{
+ "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+ "portable_data_hash": "99999999999999999999999999999998+99",
+ "properties": {
+ prurl: {
+ 'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }
+ }
+ }]
+ }
+
+ cm = mock.MagicMock()
+ cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+ cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+ cm.keys.return_value = ["file1.txt"]
+ collectionmock.return_value = cm
+
+ mockobj = CurlMock({
+ 'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ })
+ mockobj.chunk = None
+ def init():
+ return mockobj
+ curlmock.side_effect = init
+
+ utcnow = mock.MagicMock()
+ utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+ r = http_to_keep(api, None, "http://example.com/file1.txt?KeyId=123&Signature=456&Expires=789",
+ utcnow=utcnow, varying_url_params="KeyId,Signature,Expires")
+ self.assertEqual(r, ("99999999999999999999999999999998+99", "file1.txt", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',
+ 'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))
+
+ assert mockobj.perform_was_called is True
+ cm.open.assert_not_called()
+
+ api.collections().update.assert_has_calls([
+ mock.call(uuid=cm.manifest_locator(),
+ body={"collection":{"properties": {'http://example.com/file1.txt': {
+ 'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+ 'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+ 'Etag': '"123456"'
+ }}}})
+ ])
diff --git a/sdk/python/tests/test_keep_client.py b/sdk/python/tests/test_keep_client.py
index 605b90301c..5a065b2ee1 100644
--- a/sdk/python/tests/test_keep_client.py
+++ b/sdk/python/tests/test_keep_client.py
@@ -10,16 +10,25 @@ from builtins import str
from builtins import range
from builtins import object
import hashlib
-import mock
import os
+import errno
import pycurl
import random
import re
+import shutil
import socket
import sys
+import stat
+import tempfile
import time
import unittest
import urllib.parse
+import mmap
+
+from unittest import mock
+from unittest.mock import patch
+
+import parameterized
import arvados
import arvados.retry
@@ -28,17 +37,29 @@ from . import arvados_testutil as tutil
from . import keepstub
from . import run_test_server
-class KeepTestCase(run_test_server.TestCaseWithServers):
+from .arvados_testutil import DiskCacheBase
+
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
+ disk_cache = False
MAIN_SERVER = {}
KEEP_SERVER = {}
+ block_cache_test = None
@classmethod
def setUpClass(cls):
super(KeepTestCase, cls).setUpClass()
run_test_server.authorize_with("admin")
cls.api_client = arvados.api('v1')
+ cls.block_cache_test = DiskCacheBase()
cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
- proxy='', local_store='')
+ proxy='', local_store='',
+ block_cache=cls.block_cache_test.make_block_cache(cls.disk_cache))
+
+ @classmethod
+ def tearDownClass(cls):
+ super(KeepTestCase, cls).setUpClass()
+ cls.block_cache_test.tearDown()
def test_KeepBasicRWTest(self):
self.assertEqual(0, self.keep_client.upload_counter.get())
@@ -52,8 +73,8 @@ class KeepTestCase(run_test_server.TestCaseWithServers):
self.assertEqual(6, self.keep_client.upload_counter.get())
self.assertEqual(0, self.keep_client.download_counter.get())
- self.assertEqual(self.keep_client.get(foo_locator),
- b'foo',
+ self.assertTrue(tutil.binary_compare(self.keep_client.get(foo_locator),
+ b'foo'),
'wrong content from Keep.get(md5("foo"))')
self.assertEqual(3, self.keep_client.download_counter.get())
@@ -128,13 +149,18 @@ class KeepTestCase(run_test_server.TestCaseWithServers):
b'test_head',
'wrong content from Keep.get for "test_head"')
-class KeepPermissionTestCase(run_test_server.TestCaseWithServers):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepPermissionTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
+ disk_cache = False
MAIN_SERVER = {}
KEEP_SERVER = {'blob_signing': True}
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
def test_KeepBasicRWTest(self):
run_test_server.authorize_with('active')
- keep_client = arvados.KeepClient()
+ keep_client = arvados.KeepClient(block_cache=self.make_block_cache(self.disk_cache))
foo_locator = keep_client.put('foo')
self.assertRegex(
foo_locator,
@@ -144,35 +170,36 @@ class KeepPermissionTestCase(run_test_server.TestCaseWithServers):
b'foo',
'wrong content from Keep.get(md5("foo"))')
- # GET with an unsigned locator => NotFound
+ # GET with an unsigned locator => bad request
bar_locator = keep_client.put('bar')
unsigned_bar_locator = "37b51d194a7513e45b56f6524f2d51f2+3"
self.assertRegex(
bar_locator,
r'^37b51d194a7513e45b56f6524f2d51f2\+3\+A[a-f0-9]+@[a-f0-9]+$',
'invalid locator from Keep.put("bar"): ' + bar_locator)
- self.assertRaises(arvados.errors.NotFoundError,
+ self.assertRaises(arvados.errors.KeepReadError,
keep_client.get,
unsigned_bar_locator)
- # GET from a different user => NotFound
+ # GET from a different user => bad request
run_test_server.authorize_with('spectator')
- self.assertRaises(arvados.errors.NotFoundError,
+ self.assertRaises(arvados.errors.KeepReadError,
arvados.Keep.get,
bar_locator)
- # Unauthenticated GET for a signed locator => NotFound
- # Unauthenticated GET for an unsigned locator => NotFound
+ # Unauthenticated GET for a signed locator => bad request
+ # Unauthenticated GET for an unsigned locator => bad request
keep_client.api_token = ''
- self.assertRaises(arvados.errors.NotFoundError,
+ self.assertRaises(arvados.errors.KeepReadError,
keep_client.get,
bar_locator)
- self.assertRaises(arvados.errors.NotFoundError,
+ self.assertRaises(arvados.errors.KeepReadError,
keep_client.get,
unsigned_bar_locator)
-
-class KeepProxyTestCase(run_test_server.TestCaseWithServers):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepProxyTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):
+ disk_cache = False
MAIN_SERVER = {}
KEEP_SERVER = {}
KEEP_PROXY_SERVER = {}
@@ -184,14 +211,14 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
cls.api_client = arvados.api('v1')
def tearDown(self):
- arvados.config.settings().pop('ARVADOS_EXTERNAL_CLIENT', None)
super(KeepProxyTestCase, self).tearDown()
+ DiskCacheBase.tearDown(self)
def test_KeepProxyTest1(self):
# Will use ARVADOS_KEEP_SERVICES environment variable that
# is set by setUpClass().
keep_client = arvados.KeepClient(api_client=self.api_client,
- local_store='')
+ local_store='', block_cache=self.make_block_cache(self.disk_cache))
baz_locator = keep_client.put('baz')
self.assertRegex(
baz_locator,
@@ -202,28 +229,13 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
'wrong content from Keep.get(md5("baz"))')
self.assertTrue(keep_client.using_proxy)
- def test_KeepProxyTest2(self):
- # Don't instantiate the proxy directly, but set the X-External-Client
- # header. The API server should direct us to the proxy.
- arvados.config.settings()['ARVADOS_EXTERNAL_CLIENT'] = 'true'
- keep_client = arvados.KeepClient(api_client=self.api_client,
- proxy='', local_store='')
- baz_locator = keep_client.put('baz2')
- self.assertRegex(
- baz_locator,
- '^91f372a266fe2bf2823cb8ec7fda31ce\+4',
- 'wrong md5 hash from Keep.put("baz2"): ' + baz_locator)
- self.assertEqual(keep_client.get(baz_locator),
- b'baz2',
- 'wrong content from Keep.get(md5("baz2"))')
- self.assertTrue(keep_client.using_proxy)
-
def test_KeepProxyTestMultipleURIs(self):
# Test using ARVADOS_KEEP_SERVICES env var overriding any
# existing proxy setting and setting multiple proxies
arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'http://10.0.0.1 https://foo.example.org:1234/'
keep_client = arvados.KeepClient(api_client=self.api_client,
- local_store='')
+ local_store='',
+ block_cache=self.make_block_cache(self.disk_cache))
uris = [x['_service_root'] for x in keep_client._keep_services]
self.assertEqual(uris, ['http://10.0.0.1/',
'https://foo.example.org:1234/'])
@@ -232,12 +244,18 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'bad.uri.org'
with self.assertRaises(arvados.errors.ArgumentError):
keep_client = arvados.KeepClient(api_client=self.api_client,
- local_store='')
+ local_store='',
+ block_cache=self.make_block_cache(self.disk_cache))
+
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ disk_cache = False
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
-class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
def get_service_roots(self, api_client):
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
services = keep_client.weighted_service_roots(arvados.KeepLocator('0'*32))
return [urllib.parse.urlparse(url) for url in sorted(services)]
@@ -254,13 +272,25 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertEqual('100::1', service.hostname)
self.assertEqual(10, service.port)
+ def test_recognize_proxy_services_in_controller_response(self):
+ keep_client = arvados.KeepClient(api_client=self.mock_keep_services(
+ service_type='proxy', service_host='localhost', service_port=9, count=1),
+ block_cache=self.make_block_cache(self.disk_cache))
+ try:
+ # this will fail, but it ensures we get the service
+ # discovery response
+ keep_client.put('baz2', num_retries=0)
+ except:
+ pass
+ self.assertTrue(keep_client.using_proxy)
+
def test_insecure_disables_tls_verify(self):
api_client = self.mock_keep_services(count=1)
force_timeout = socket.timeout("timed out")
api_client.insecure = True
with tutil.mock_keep_responses(b'foo', 200) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
self.assertEqual(
mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
@@ -271,7 +301,7 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client.insecure = False
with tutil.mock_keep_responses(b'foo', 200) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
# getopt()==None here means we didn't change the
# default. If we were using real pycurl instead of a mock,
@@ -292,7 +322,7 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
headers = {'X-Keep-Locator':local_loc}
with tutil.mock_keep_responses('', 200, **headers):
# Check that the translated locator gets returned
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
self.assertEqual(local_loc, keep_client.refresh_signature(remote_loc))
# Check that refresh_signature() uses the correct method and headers
keep_client._get_or_head = mock.MagicMock()
@@ -311,7 +341,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(count=1)
force_timeout = socket.timeout("timed out")
with tutil.mock_keep_responses(force_timeout, 0) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.get('ffffffffffffffffffffffffffffffff')
self.assertEqual(
@@ -328,7 +362,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(count=1)
force_timeout = socket.timeout("timed out")
with tutil.mock_keep_responses(force_timeout, 0) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
with self.assertRaises(arvados.errors.KeepWriteError):
keep_client.put(b'foo')
self.assertEqual(
@@ -345,7 +383,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(count=1)
force_timeout = socket.timeout("timed out")
with tutil.mock_keep_responses(force_timeout, 0) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.head('ffffffffffffffffffffffffffffffff')
self.assertEqual(
@@ -362,7 +404,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(service_type='proxy', count=1)
force_timeout = socket.timeout("timed out")
with tutil.mock_keep_responses(force_timeout, 0) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.get('ffffffffffffffffffffffffffffffff')
self.assertEqual(
@@ -379,7 +425,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(service_type='proxy', count=1)
force_timeout = socket.timeout("timed out")
with tutil.mock_keep_responses(force_timeout, 0) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.head('ffffffffffffffffffffffffffffffff')
self.assertEqual(
@@ -393,10 +443,14 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
None)
def test_proxy_put_timeout(self):
+ self.disk_cache_dir = None
api_client = self.mock_keep_services(service_type='proxy', count=1)
force_timeout = socket.timeout("timed out")
with tutil.mock_keep_responses(force_timeout, 0) as mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ num_retries=0,
+ )
with self.assertRaises(arvados.errors.KeepWriteError):
keep_client.put('foo')
self.assertEqual(
@@ -413,7 +467,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = mock.MagicMock(name='api_client')
api_client.keep_services().accessible().execute.side_effect = (
arvados.errors.ApiError)
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
with self.assertRaises(exc_class) as err_check:
getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0')
self.assertEqual(0, len(err_check.exception.request_errors()))
@@ -433,7 +491,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
"retry error reporting test", 500, 500, 500, 500, 500, 500, 502, 502)
with req_mock, tutil.skip_sleep, \
self.assertRaises(exc_class) as err_check:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0',
num_retries=3)
self.assertEqual([502, 502], [
@@ -456,7 +518,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(count=3)
with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \
self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
keep_client.put(data)
self.assertEqual(2, len(exc_check.exception.request_errors()))
@@ -466,8 +532,12 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
api_client = self.mock_keep_services(service_type='proxy', read_only=True, count=1)
with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \
self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
- keep_client = arvados.KeepClient(api_client=api_client)
- keep_client.put(data)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
+ keep_client.put(data)
self.assertEqual(True, ("no Keep services available" in str(exc_check.exception)))
self.assertEqual(0, len(exc_check.exception.request_errors()))
@@ -475,7 +545,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
body = b'oddball service get'
api_client = self.mock_keep_services(service_type='fancynewblobstore')
with tutil.mock_keep_responses(body, 200):
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
actual = keep_client.get(tutil.str_keep_locator(body))
self.assertEqual(body, actual)
@@ -484,7 +558,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
pdh = tutil.str_keep_locator(body)
api_client = self.mock_keep_services(service_type='fancynewblobstore')
with tutil.mock_keep_responses(pdh, 200):
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
actual = keep_client.put(body, copies=1)
self.assertEqual(pdh, actual)
@@ -496,20 +574,29 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
headers = {'x-keep-replicas-stored': 3}
with tutil.mock_keep_responses(pdh, 200, 418, 418, 418,
**headers) as req_mock:
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(
+ api_client=api_client,
+ block_cache=self.make_block_cache(self.disk_cache),
+ num_retries=0,
+ )
actual = keep_client.put(body, copies=2)
self.assertEqual(pdh, actual)
self.assertEqual(1, req_mock.call_count)
-
@tutil.skip_sleep
-class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ disk_cache = False
+
def setUp(self):
self.api_client = self.mock_keep_services(count=2)
- self.keep_client = arvados.KeepClient(api_client=self.api_client)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
self.data = b'xyzzy'
self.locator = '1271ed5ef305aadabc605b1609e24c52'
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
@mock.patch('arvados.KeepClient.KeepService.get')
def test_get_request_cache(self, get_mock):
with tutil.mock_keep_responses(self.data, 200, 200):
@@ -530,130 +617,26 @@ class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
def test_head_and_then_get_return_different_responses(self, get_mock):
head_resp = None
get_resp = None
- get_mock.side_effect = ['first response', 'second response']
+ get_mock.side_effect = [b'first response', b'second response']
with tutil.mock_keep_responses(self.data, 200, 200):
head_resp = self.keep_client.head(self.locator)
get_resp = self.keep_client.get(self.locator)
- self.assertEqual('first response', head_resp)
+ self.assertEqual(b'first response', head_resp)
# First reponse was not cached because it was from a HEAD request.
self.assertNotEqual(head_resp, get_resp)
-@tutil.skip_sleep
-class KeepStorageClassesTestCase(unittest.TestCase, tutil.ApiClientMock):
- def setUp(self):
- self.api_client = self.mock_keep_services(count=2)
- self.keep_client = arvados.KeepClient(api_client=self.api_client)
- self.data = b'xyzzy'
- self.locator = '1271ed5ef305aadabc605b1609e24c52'
- def test_multiple_default_storage_classes_req_header(self):
- api_mock = self.api_client_mock()
- api_mock.config.return_value = {
- 'StorageClasses': {
- 'foo': { 'Default': True },
- 'bar': { 'Default': True },
- 'baz': { 'Default': False }
- }
- }
- api_client = self.mock_keep_services(api_mock=api_mock, count=2)
- keep_client = arvados.KeepClient(api_client=api_client)
- resp_hdr = {
- 'x-keep-storage-classes-confirmed': 'foo=1, bar=1',
- 'x-keep-replicas-stored': 1
- }
- with tutil.mock_keep_responses(self.locator, 200, **resp_hdr) as mock:
- keep_client.put(self.data, copies=1)
- req_hdr = mock.responses[0]
- self.assertIn(
- 'X-Keep-Storage-Classes: bar, foo', req_hdr.getopt(pycurl.HTTPHEADER))
-
- def test_storage_classes_req_header(self):
- self.assertEqual(
- self.api_client.config()['StorageClasses'],
- {'default': {'Default': True}})
- cases = [
- # requested, expected
- [['foo'], 'X-Keep-Storage-Classes: foo'],
- [['bar', 'foo'], 'X-Keep-Storage-Classes: bar, foo'],
- [[], 'X-Keep-Storage-Classes: default'],
- [None, 'X-Keep-Storage-Classes: default'],
- ]
- for req_classes, expected_header in cases:
- headers = {'x-keep-replicas-stored': 1}
- if req_classes is None or len(req_classes) == 0:
- confirmed_hdr = 'default=1'
- elif len(req_classes) > 0:
- confirmed_hdr = ', '.join(["{}=1".format(cls) for cls in req_classes])
- headers.update({'x-keep-storage-classes-confirmed': confirmed_hdr})
- with tutil.mock_keep_responses(self.locator, 200, **headers) as mock:
- self.keep_client.put(self.data, copies=1, classes=req_classes)
- req_hdr = mock.responses[0]
- self.assertIn(expected_header, req_hdr.getopt(pycurl.HTTPHEADER))
-
- def test_partial_storage_classes_put(self):
- headers = {
- 'x-keep-replicas-stored': 1,
- 'x-keep-storage-classes-confirmed': 'foo=1'}
- with tutil.mock_keep_responses(self.locator, 200, 503, **headers) as mock:
- with self.assertRaises(arvados.errors.KeepWriteError):
- self.keep_client.put(self.data, copies=1, classes=['foo', 'bar'])
- # 1st request, both classes pending
- req1_headers = mock.responses[0].getopt(pycurl.HTTPHEADER)
- self.assertIn('X-Keep-Storage-Classes: bar, foo', req1_headers)
- # 2nd try, 'foo' class already satisfied
- req2_headers = mock.responses[1].getopt(pycurl.HTTPHEADER)
- self.assertIn('X-Keep-Storage-Classes: bar', req2_headers)
-
- def test_successful_storage_classes_put_requests(self):
- cases = [
- # wanted_copies, wanted_classes, confirmed_copies, confirmed_classes, expected_requests
- [ 1, ['foo'], 1, 'foo=1', 1],
- [ 1, ['foo'], 2, 'foo=2', 1],
- [ 2, ['foo'], 2, 'foo=2', 1],
- [ 2, ['foo'], 1, 'foo=1', 2],
- [ 1, ['foo', 'bar'], 1, 'foo=1, bar=1', 1],
- [ 1, ['foo', 'bar'], 2, 'foo=2, bar=2', 1],
- [ 2, ['foo', 'bar'], 2, 'foo=2, bar=2', 1],
- [ 2, ['foo', 'bar'], 1, 'foo=1, bar=1', 2],
- [ 1, ['foo', 'bar'], 1, None, 1],
- [ 1, ['foo'], 1, None, 1],
- [ 2, ['foo'], 2, None, 1],
- [ 2, ['foo'], 1, None, 2],
- ]
- for w_copies, w_classes, c_copies, c_classes, e_reqs in cases:
- headers = {'x-keep-replicas-stored': c_copies}
- if c_classes is not None:
- headers.update({'x-keep-storage-classes-confirmed': c_classes})
- with tutil.mock_keep_responses(self.locator, 200, 200, **headers) as mock:
- case_desc = 'wanted_copies={}, wanted_classes="{}", confirmed_copies={}, confirmed_classes="{}", expected_requests={}'.format(w_copies, ', '.join(w_classes), c_copies, c_classes, e_reqs)
- self.assertEqual(self.locator,
- self.keep_client.put(self.data, copies=w_copies, classes=w_classes),
- case_desc)
- self.assertEqual(e_reqs, mock.call_count, case_desc)
-
- def test_failed_storage_classes_put_requests(self):
- cases = [
- # wanted_copies, wanted_classes, confirmed_copies, confirmed_classes, return_code
- [ 1, ['foo'], 1, 'bar=1', 200],
- [ 1, ['foo'], 1, None, 503],
- [ 2, ['foo'], 1, 'bar=1, foo=0', 200],
- [ 3, ['foo'], 1, 'bar=1, foo=1', 200],
- [ 3, ['foo', 'bar'], 1, 'bar=2, foo=1', 200],
- ]
- for w_copies, w_classes, c_copies, c_classes, return_code in cases:
- headers = {'x-keep-replicas-stored': c_copies}
- if c_classes is not None:
- headers.update({'x-keep-storage-classes-confirmed': c_classes})
- with tutil.mock_keep_responses(self.locator, return_code, return_code, **headers):
- case_desc = 'wanted_copies={}, wanted_classes="{}", confirmed_copies={}, confirmed_classes="{}"'.format(w_copies, ', '.join(w_classes), c_copies, c_classes)
- with self.assertRaises(arvados.errors.KeepWriteError, msg=case_desc):
- self.keep_client.put(self.data, copies=w_copies, classes=w_classes)
+
+
@tutil.skip_sleep
-class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ disk_cache = False
+
def setUp(self):
self.api_client = self.mock_keep_services(count=2)
- self.keep_client = arvados.KeepClient(api_client=self.api_client)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
self.data = b'xyzzy'
self.locator = '1271ed5ef305aadabc605b1609e24c52'
self.test_id = arvados.util.new_request_id()
@@ -663,6 +646,9 @@ class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock):
# id='123456789'>:
self.api_client.request_id = None
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
def test_default_to_api_client_request_id(self):
self.api_client.request_id = self.test_id
with tutil.mock_keep_responses(self.locator, 200, 200) as mock:
@@ -738,7 +724,9 @@ class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock):
@tutil.skip_sleep
-class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
+#@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ disk_cache = False
def setUp(self):
# expected_order[i] is the probe order for
@@ -761,7 +749,10 @@ class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
hashlib.md5(self.blocks[x]).hexdigest()
for x in range(len(self.expected_order))]
self.api_client = self.mock_keep_services(count=self.services)
- self.keep_client = arvados.KeepClient(api_client=self.api_client)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
+
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
def test_weighted_service_roots_against_reference_set(self):
# Confirm weighted_service_roots() returns the correct order
@@ -834,12 +825,12 @@ class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
hashlib.md5("{:064x}".format(x).encode()).hexdigest() for x in range(100)]
initial_services = 12
self.api_client = self.mock_keep_services(count=initial_services)
- self.keep_client = arvados.KeepClient(api_client=self.api_client)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
probes_before = [
self.keep_client.weighted_service_roots(arvados.KeepLocator(hash)) for hash in hashes]
for added_services in range(1, 12):
api_client = self.mock_keep_services(count=initial_services+added_services)
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
total_penalty = 0
for hash_index in range(len(hashes)):
probe_after = keep_client.weighted_service_roots(
@@ -875,7 +866,7 @@ class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
# Arbitrary port number:
aport = random.randint(1024,65535)
api_client = self.mock_keep_services(service_port=aport, count=self.services)
- keep_client = arvados.KeepClient(api_client=api_client)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
with mock.patch('pycurl.Curl') as curl_mock, \
self.assertRaises(exc_class) as err_check:
curl_mock.return_value = tutil.FakeCurl.make(code=500, body=b'')
@@ -891,8 +882,10 @@ class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
def test_put_error_shows_probe_order(self):
self.check_64_zeros_error_order('put', arvados.errors.KeepWriteError)
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase, DiskCacheBase):
+ disk_cache = False
-class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
# BANDWIDTH_LOW_LIM must be less than len(DATA) so we can transfer
# 1s worth of data and then trigger bandwidth errors before running
# out of data.
@@ -900,6 +893,9 @@ class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
BANDWIDTH_LOW_LIM = 1024
TIMEOUT_TIME = 1.0
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
class assertTakesBetween(unittest.TestCase):
def __init__(self, tmin, tmax):
self.tmin = tmin
@@ -929,7 +925,7 @@ class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
def keepClient(self, timeouts=(0.1, TIMEOUT_TIME, BANDWIDTH_LOW_LIM)):
return arvados.KeepClient(
api_client=self.api_client,
- timeout=timeouts)
+ timeout=timeouts, block_cache=self.make_block_cache(self.disk_cache))
def test_timeout_slow_connect(self):
# Can't simulate TCP delays with our own socket. Leave our
@@ -1033,8 +1029,13 @@ class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
with self.assertRaises(arvados.errors.KeepWriteError):
kc.put(self.DATA, copies=1, num_retries=0)
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ disk_cache = False
+
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
-class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock):
def mock_disks_and_gateways(self, disks=3, gateways=1):
self.gateways = [{
'uuid': 'zzzzz-bi6l4-gateway{:08d}'.format(i),
@@ -1049,7 +1050,7 @@ class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock):
for gw in self.gateways]
self.api_client = self.mock_keep_services(
count=disks, additional_services=self.gateways)
- self.keepClient = arvados.KeepClient(api_client=self.api_client)
+ self.keepClient = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
@mock.patch('pycurl.Curl')
def test_get_with_gateway_hint_first(self, MockCurl):
@@ -1130,8 +1131,9 @@ class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock):
self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,
MockCurl.return_value.getopt(pycurl.URL).decode())
-
class KeepClientRetryTestMixin(object):
+ disk_cache = False
+
# Testing with a local Keep store won't exercise the retry behavior.
# Instead, our strategy is:
# * Create a client with one proxy specified (pointed at a black
@@ -1156,6 +1158,7 @@ class KeepClientRetryTestMixin(object):
def new_client(self, **caller_kwargs):
kwargs = self.client_kwargs.copy()
kwargs.update(caller_kwargs)
+ kwargs['block_cache'] = self.make_block_cache(self.disk_cache)
return arvados.KeepClient(**kwargs)
def run_method(self, *args, **kwargs):
@@ -1185,10 +1188,6 @@ class KeepClientRetryTestMixin(object):
with self.TEST_PATCHER(self.DEFAULT_EXPECT, Exception('mock err'), 200):
self.check_success(num_retries=3)
- def test_no_default_retry(self):
- with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
- self.check_exception()
-
def test_no_retry_after_permanent_error(self):
with self.TEST_PATCHER(self.DEFAULT_EXPECT, 403, 200):
self.check_exception(num_retries=3)
@@ -1205,12 +1204,16 @@ class KeepClientRetryTestMixin(object):
@tutil.skip_sleep
-class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):
DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_DATA
DEFAULT_EXCEPTION = arvados.errors.KeepReadError
HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'
TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,
*args, **kwargs):
return self.new_client().get(locator, *args, **kwargs)
@@ -1224,7 +1227,7 @@ class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase):
# and a high threshold of servers report that it's not found.
# This test rigs up 50/50 disagreement between two servers, and
# checks that it does not become a NotFoundError.
- client = self.new_client()
+ client = self.new_client(num_retries=0)
with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):
with self.assertRaises(arvados.errors.KeepReadError) as exc_check:
client.get(self.HINTED_LOCATOR)
@@ -1249,12 +1252,16 @@ class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase):
self.check_success(locator=self.HINTED_LOCATOR)
@tutil.skip_sleep
-class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):
DEFAULT_EXPECT = True
DEFAULT_EXCEPTION = arvados.errors.KeepReadError
HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'
TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,
*args, **kwargs):
return self.new_client().head(locator, *args, **kwargs)
@@ -1268,7 +1275,7 @@ class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase):
# and a high threshold of servers report that it's not found.
# This test rigs up 50/50 disagreement between two servers, and
# checks that it does not become a NotFoundError.
- client = self.new_client()
+ client = self.new_client(num_retries=0)
with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):
with self.assertRaises(arvados.errors.KeepReadError) as exc_check:
client.head(self.HINTED_LOCATOR)
@@ -1287,11 +1294,15 @@ class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase):
self.check_success(locator=self.HINTED_LOCATOR)
@tutil.skip_sleep
-class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):
DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_LOCATOR
DEFAULT_EXCEPTION = arvados.errors.KeepWriteError
TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
def run_method(self, data=KeepClientRetryTestMixin.TEST_DATA,
copies=1, *args, **kwargs):
return self.new_client().put(data, copies, *args, **kwargs)
@@ -1375,13 +1386,19 @@ class AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):
@tutil.skip_sleep
-class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ block_cache = False
+
# Test put()s that need two distinct servers to succeed, possibly
# requiring multiple passes through the retry loop.
def setUp(self):
self.api_client = self.mock_keep_services(count=2)
- self.keep_client = arvados.KeepClient(api_client=self.api_client)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
+
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
def test_success_after_exception(self):
with tutil.mock_keep_responses(
@@ -1408,7 +1425,13 @@ class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock):
self.keep_client.put('foo', num_retries=1, copies=2)
self.assertEqual(2, req_mock.call_count)
-class KeepClientAPIErrorTest(unittest.TestCase):
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepClientAPIErrorTest(unittest.TestCase, DiskCacheBase):
+ disk_cache = False
+
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
def test_api_fail(self):
class ApiMock(object):
def __getattr__(self, r):
@@ -1421,7 +1444,8 @@ class KeepClientAPIErrorTest(unittest.TestCase):
else:
raise arvados.errors.KeepReadError()
keep_client = arvados.KeepClient(api_client=ApiMock(),
- proxy='', local_store='')
+ proxy='', local_store='',
+ block_cache=self.make_block_cache(self.disk_cache))
# The bug this is testing for is that if an API (not
# keepstore) exception is thrown as part of a get(), the next
@@ -1435,3 +1459,252 @@ class KeepClientAPIErrorTest(unittest.TestCase):
keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8+3")
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+
+class KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
+ def setUp(self):
+ self.api_client = self.mock_keep_services(count=2)
+ self.data = b'xyzzy'
+ self.locator = '1271ed5ef305aadabc605b1609e24c52'
+ self.disk_cache_dir = tempfile.mkdtemp()
+
+ def tearDown(self):
+ shutil.rmtree(self.disk_cache_dir)
+
+
+ @mock.patch('arvados.KeepClient.KeepService.get')
+ def test_disk_cache_read(self, get_mock):
+ # confirm it finds an existing cache block when the cache is
+ # initialized.
+
+ os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "wb") as f:
+ f.write(self.data)
+
+ # block cache should have found the existing block
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+ keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
+
+ self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))
+
+ get_mock.assert_not_called()
+
+
+ @mock.patch('arvados.KeepClient.KeepService.get')
+ def test_disk_cache_share(self, get_mock):
+ # confirm it finds a cache block written after the disk cache
+ # was initialized.
+
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+ keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
+
+ os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "wb") as f:
+ f.write(self.data)
+
+ # when we try to get the block, it'll check the disk and find it.
+ self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))
+
+ get_mock.assert_not_called()
+
+
+ def test_disk_cache_write(self):
+ # confirm the cache block was created
+
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+ keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
+
+ with tutil.mock_keep_responses(self.data, 200) as mock:
+ self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))
+
+ self.assertIsNotNone(keep_client.get_from_cache(self.locator))
+
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "rb") as f:
+ self.assertTrue(tutil.binary_compare(f.read(), self.data))
+
+
+ def test_disk_cache_clean(self):
+ # confirm that a tmp file in the cache is cleaned up
+
+ os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC.keepcacheblock"), "wb") as f:
+ f.write(b"abc1")
+
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC"), "wb") as f:
+ f.write(b"abc2")
+
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], "XYZABC"), "wb") as f:
+ f.write(b"abc3")
+
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC.keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "XYZABC")))
+
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+
+ # The tmp still hasn't been deleted because it was created in the last 60 seconds
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC.keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "XYZABC")))
+
+ # Set the mtime to 61s in the past
+ os.utime(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC.keepcacheblock"), times=(time.time()-61, time.time()-61))
+ os.utime(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC"), times=(time.time()-61, time.time()-61))
+ os.utime(os.path.join(self.disk_cache_dir, self.locator[0:3], "XYZABC"), times=(time.time()-61, time.time()-61))
+
+ block_cache2 = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+
+ # Tmp should be gone but the other ones are safe.
+ self.assertFalse(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC.keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "tmpXYZABC")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], "XYZABC")))
+
+
+ @mock.patch('arvados.KeepClient.KeepService.get')
+ def test_disk_cache_cap(self, get_mock):
+ # confirm that the cache is kept to the desired limit
+
+ os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "wb") as f:
+ f.write(self.data)
+
+ os.makedirs(os.path.join(self.disk_cache_dir, "acb"))
+ with open(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock"), "wb") as f:
+ f.write(b"foo")
+
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
+
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir,
+ max_slots=1)
+
+ self.assertFalse(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
+
+
+ @mock.patch('arvados.KeepClient.KeepService.get')
+ def test_disk_cache_share(self, get_mock):
+ # confirm that a second cache doesn't delete files that belong to the first cache.
+
+ os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "wb") as f:
+ f.write(self.data)
+
+ os.makedirs(os.path.join(self.disk_cache_dir, "acb"))
+ with open(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock"), "wb") as f:
+ f.write(b"foo")
+
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
+
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir,
+ max_slots=2)
+
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
+
+ block_cache2 = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir,
+ max_slots=1)
+
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock")))
+ self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, "acb", "acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock")))
+
+
+
+ def test_disk_cache_error(self):
+ os.chmod(self.disk_cache_dir, stat.S_IRUSR)
+
+ # Fail during cache initialization.
+ with self.assertRaises(OSError):
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+
+
+ def test_disk_cache_write_error(self):
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+
+ keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
+
+ # Make the cache dir read-only
+ os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))
+ os.chmod(os.path.join(self.disk_cache_dir, self.locator[0:3]), stat.S_IRUSR)
+
+ # Cache fails
+ with self.assertRaises(arvados.errors.KeepCacheError):
+ with tutil.mock_keep_responses(self.data, 200) as mock:
+ keep_client.get(self.locator)
+
+
+ def test_disk_cache_retry_write_error(self):
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+
+ keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
+
+ called = False
+ realmmap = mmap.mmap
+ def sideeffect_mmap(*args, **kwargs):
+ nonlocal called
+ if not called:
+ called = True
+ raise OSError(errno.ENOSPC, "no space")
+ else:
+ return realmmap(*args, **kwargs)
+
+ with patch('mmap.mmap') as mockmmap:
+ mockmmap.side_effect = sideeffect_mmap
+
+ cache_max_before = block_cache.cache_max
+
+ with tutil.mock_keep_responses(self.data, 200) as mock:
+ self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))
+
+ self.assertIsNotNone(keep_client.get_from_cache(self.locator))
+
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "rb") as f:
+ self.assertTrue(tutil.binary_compare(f.read(), self.data))
+
+ # shrank the cache in response to ENOSPC
+ self.assertTrue(cache_max_before > block_cache.cache_max)
+
+
+ def test_disk_cache_retry_write_error2(self):
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=True,
+ disk_cache_dir=self.disk_cache_dir)
+
+ keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)
+
+ called = False
+ realmmap = mmap.mmap
+ def sideeffect_mmap(*args, **kwargs):
+ nonlocal called
+ if not called:
+ called = True
+ raise OSError(errno.ENOMEM, "no memory")
+ else:
+ return realmmap(*args, **kwargs)
+
+ with patch('mmap.mmap') as mockmmap:
+ mockmmap.side_effect = sideeffect_mmap
+
+ slots_before = block_cache._max_slots
+
+ with tutil.mock_keep_responses(self.data, 200) as mock:
+ self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))
+
+ self.assertIsNotNone(keep_client.get_from_cache(self.locator))
+
+ with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+".keepcacheblock"), "rb") as f:
+ self.assertTrue(tutil.binary_compare(f.read(), self.data))
+
+ # shrank the cache in response to ENOMEM
+ self.assertTrue(slots_before > block_cache._max_slots)
diff --git a/sdk/python/tests/test_retry.py b/sdk/python/tests/test_retry.py
index 2d02005937..3f0064f96f 100644
--- a/sdk/python/tests/test_retry.py
+++ b/sdk/python/tests/test_retry.py
@@ -8,9 +8,10 @@ from builtins import object
import itertools
import unittest
+from unittest import mock
+
import arvados.errors as arv_error
import arvados.retry as arv_retry
-import mock
class RetryLoopTestMixin(object):
@staticmethod
@@ -174,14 +175,14 @@ class CheckHTTPResponseSuccessTestCase(unittest.TestCase):
self.check_is(True, *list(range(200, 207)))
def test_obvious_stops(self):
- self.check_is(False, 424, 426, 428, 431,
+ self.check_is(False, 422, 424, 426, 428, 431,
*list(range(400, 408)) + list(range(410, 420)))
def test_obvious_retries(self):
self.check_is(None, 500, 502, 503, 504)
def test_4xx_retries(self):
- self.check_is(None, 408, 409, 422, 423)
+ self.check_is(None, 408, 409, 423)
def test_5xx_failures(self):
self.check_is(False, 501, *list(range(505, 512)))
diff --git a/sdk/python/tests/test_retry_job_helpers.py b/sdk/python/tests/test_retry_job_helpers.py
index 76c62cb0ce..f4e80e61fe 100644
--- a/sdk/python/tests/test_retry_job_helpers.py
+++ b/sdk/python/tests/test_retry_job_helpers.py
@@ -4,7 +4,6 @@
from __future__ import absolute_import
from builtins import object
-import mock
import os
import unittest
import hashlib
@@ -14,6 +13,7 @@ import arvados
from . import arvados_testutil as tutil
from apiclient import http as apiclient_http
+from unittest import mock
@tutil.skip_sleep
class ApiClientRetryTestMixin(object):
@@ -28,7 +28,7 @@ class ApiClientRetryTestMixin(object):
def setUp(self):
# Patch arvados.api() to return our mock API, so we can mock
# its http requests.
- self.api_client = arvados.api('v1', cache=False)
+ self.api_client = arvados.api('v1', cache=False, num_retries=0)
self.api_patch = mock.patch('arvados.api', return_value=self.api_client)
self.api_patch.start()
diff --git a/sdk/python/tests/test_safeapi.py b/sdk/python/tests/test_safeapi.py
new file mode 100644
index 0000000000..a41219e9c5
--- /dev/null
+++ b/sdk/python/tests/test_safeapi.py
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import unittest
+
+import googleapiclient
+
+from arvados import safeapi
+
+from . import run_test_server
+
+class SafeApiTest(run_test_server.TestCaseWithServers):
+ MAIN_SERVER = {}
+
+ def test_constructor(self):
+ env_mapping = {
+ key: value
+ for key, value in os.environ.items()
+ if key.startswith('ARVADOS_API_')
+ }
+ extra_params = {
+ 'timeout': 299,
+ }
+ base_params = {
+ key[12:].lower(): value
+ for key, value in env_mapping.items()
+ }
+ try:
+ base_params['insecure'] = base_params.pop('host_insecure')
+ except KeyError:
+ pass
+ expected_keep_params = {}
+ for config, params, subtest in [
+ (None, {}, "default arguments"),
+ (None, extra_params, "extra params"),
+ (env_mapping, {}, "explicit config"),
+ (env_mapping, extra_params, "explicit config and params"),
+ ({}, base_params, "params only"),
+ ]:
+ with self.subTest(f"test constructor with {subtest}"):
+ expected_timeout = params.get('timeout', 300)
+ expected_params = dict(params)
+ keep_params = dict(expected_keep_params)
+ client = safeapi.ThreadSafeApiCache(config, keep_params, params, 'v1')
+ self.assertTrue(hasattr(client, 'localapi'), "client missing localapi method")
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])
+ self.assertEqual(client._http.timeout, expected_timeout)
+ self.assertEqual(params, expected_params,
+ "api_params was modified in-place")
+ self.assertEqual(keep_params, expected_keep_params,
+ "keep_params was modified in-place")
+
+ def test_constructor_no_args(self):
+ client = safeapi.ThreadSafeApiCache()
+ self.assertTrue(hasattr(client, 'localapi'), "client missing localapi method")
+ self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])
+ self.assertTrue(client.insecure)
+
+ def test_constructor_bad_version(self):
+ with self.assertRaises(googleapiclient.errors.UnknownApiNameOrVersion):
+ safeapi.ThreadSafeApiCache(version='BadTestVersion')
diff --git a/sdk/python/tests/test_sdk.py b/sdk/python/tests/test_sdk.py
index 41add57c0e..4ef81c53d8 100644
--- a/sdk/python/tests/test_sdk.py
+++ b/sdk/python/tests/test_sdk.py
@@ -2,10 +2,11 @@
#
# SPDX-License-Identifier: Apache-2.0
-import mock
import os
import unittest
+from unittest import mock
+
import arvados
import arvados.collection
diff --git a/sdk/python/tests/test_storage_classes.py b/sdk/python/tests/test_storage_classes.py
new file mode 100644
index 0000000000..21bacc310a
--- /dev/null
+++ b/sdk/python/tests/test_storage_classes.py
@@ -0,0 +1,128 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import pycurl
+
+import unittest
+import parameterized
+from . import arvados_testutil as tutil
+from .arvados_testutil import DiskCacheBase
+
+@tutil.skip_sleep
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
+class KeepStorageClassesTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):
+ disk_cache = False
+
+ def setUp(self):
+ self.api_client = self.mock_keep_services(count=2)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))
+ self.data = b'xyzzy'
+ self.locator = '1271ed5ef305aadabc605b1609e24c52'
+
+ def tearDown(self):
+ DiskCacheBase.tearDown(self)
+
+ def test_multiple_default_storage_classes_req_header(self):
+ api_mock = self.api_client_mock()
+ api_mock.config.return_value = {
+ 'StorageClasses': {
+ 'foo': { 'Default': True },
+ 'bar': { 'Default': True },
+ 'baz': { 'Default': False }
+ }
+ }
+ api_client = self.mock_keep_services(api_mock=api_mock, count=2)
+ keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))
+ resp_hdr = {
+ 'x-keep-storage-classes-confirmed': 'foo=1, bar=1',
+ 'x-keep-replicas-stored': 1
+ }
+ with tutil.mock_keep_responses(self.locator, 200, **resp_hdr) as mock:
+ keep_client.put(self.data, copies=1)
+ req_hdr = mock.responses[0]
+ self.assertIn(
+ 'X-Keep-Storage-Classes: bar, foo', req_hdr.getopt(pycurl.HTTPHEADER))
+
+ def test_storage_classes_req_header(self):
+ self.assertEqual(
+ self.api_client.config()['StorageClasses'],
+ {'default': {'Default': True}})
+ cases = [
+ # requested, expected
+ [['foo'], 'X-Keep-Storage-Classes: foo'],
+ [['bar', 'foo'], 'X-Keep-Storage-Classes: bar, foo'],
+ [[], 'X-Keep-Storage-Classes: default'],
+ [None, 'X-Keep-Storage-Classes: default'],
+ ]
+ for req_classes, expected_header in cases:
+ headers = {'x-keep-replicas-stored': 1}
+ if req_classes is None or len(req_classes) == 0:
+ confirmed_hdr = 'default=1'
+ elif len(req_classes) > 0:
+ confirmed_hdr = ', '.join(["{}=1".format(cls) for cls in req_classes])
+ headers.update({'x-keep-storage-classes-confirmed': confirmed_hdr})
+ with tutil.mock_keep_responses(self.locator, 200, **headers) as mock:
+ self.keep_client.put(self.data, copies=1, classes=req_classes)
+ req_hdr = mock.responses[0]
+ self.assertIn(expected_header, req_hdr.getopt(pycurl.HTTPHEADER))
+
+ def test_partial_storage_classes_put(self):
+ headers = {
+ 'x-keep-replicas-stored': 1,
+ 'x-keep-storage-classes-confirmed': 'foo=1'}
+ with tutil.mock_keep_responses(self.locator, 200, 503, **headers) as mock:
+ with self.assertRaises(arvados.errors.KeepWriteError):
+ self.keep_client.put(self.data, copies=1, classes=['foo', 'bar'], num_retries=0)
+ # 1st request, both classes pending
+ req1_headers = mock.responses[0].getopt(pycurl.HTTPHEADER)
+ self.assertIn('X-Keep-Storage-Classes: bar, foo', req1_headers)
+ # 2nd try, 'foo' class already satisfied
+ req2_headers = mock.responses[1].getopt(pycurl.HTTPHEADER)
+ self.assertIn('X-Keep-Storage-Classes: bar', req2_headers)
+
+ def test_successful_storage_classes_put_requests(self):
+ cases = [
+ # wanted_copies, wanted_classes, confirmed_copies, confirmed_classes, expected_requests
+ [ 1, ['foo'], 1, 'foo=1', 1],
+ [ 1, ['foo'], 2, 'foo=2', 1],
+ [ 2, ['foo'], 2, 'foo=2', 1],
+ [ 2, ['foo'], 1, 'foo=1', 2],
+ [ 1, ['foo', 'bar'], 1, 'foo=1, bar=1', 1],
+ [ 1, ['foo', 'bar'], 2, 'foo=2, bar=2', 1],
+ [ 2, ['foo', 'bar'], 2, 'foo=2, bar=2', 1],
+ [ 2, ['foo', 'bar'], 1, 'foo=1, bar=1', 2],
+ [ 1, ['foo', 'bar'], 1, None, 1],
+ [ 1, ['foo'], 1, None, 1],
+ [ 2, ['foo'], 2, None, 1],
+ [ 2, ['foo'], 1, None, 2],
+ ]
+ for w_copies, w_classes, c_copies, c_classes, e_reqs in cases:
+ headers = {'x-keep-replicas-stored': c_copies}
+ if c_classes is not None:
+ headers.update({'x-keep-storage-classes-confirmed': c_classes})
+ with tutil.mock_keep_responses(self.locator, 200, 200, **headers) as mock:
+ case_desc = 'wanted_copies={}, wanted_classes="{}", confirmed_copies={}, confirmed_classes="{}", expected_requests={}'.format(w_copies, ', '.join(w_classes), c_copies, c_classes, e_reqs)
+ self.assertEqual(self.locator,
+ self.keep_client.put(self.data, copies=w_copies, classes=w_classes),
+ case_desc)
+ self.assertEqual(e_reqs, mock.call_count, case_desc)
+
+ def test_failed_storage_classes_put_requests(self):
+ cases = [
+ # wanted_copies, wanted_classes, confirmed_copies, confirmed_classes, return_code
+ [ 1, ['foo'], 1, 'bar=1', 200],
+ [ 1, ['foo'], 1, None, 503],
+ [ 2, ['foo'], 1, 'bar=1, foo=0', 200],
+ [ 3, ['foo'], 1, 'bar=1, foo=1', 200],
+ [ 3, ['foo', 'bar'], 1, 'bar=2, foo=1', 200],
+ ]
+ for w_copies, w_classes, c_copies, c_classes, return_code in cases:
+ headers = {'x-keep-replicas-stored': c_copies}
+ if c_classes is not None:
+ headers.update({'x-keep-storage-classes-confirmed': c_classes})
+ with tutil.mock_keep_responses(self.locator, return_code, return_code, **headers):
+ case_desc = 'wanted_copies={}, wanted_classes="{}", confirmed_copies={}, confirmed_classes="{}"'.format(w_copies, ', '.join(w_classes), c_copies, c_classes)
+ with self.assertRaises(arvados.errors.KeepWriteError, msg=case_desc):
+ self.keep_client.put(self.data, copies=w_copies, classes=w_classes, num_retries=0)
diff --git a/sdk/python/tests/test_stream.py b/sdk/python/tests/test_stream.py
index dc84a037f8..374800c55b 100644
--- a/sdk/python/tests/test_stream.py
+++ b/sdk/python/tests/test_stream.py
@@ -7,11 +7,12 @@ from builtins import object
import bz2
import gzip
import io
-import mock
import os
import unittest
import hashlib
+from unittest import mock
+
import arvados
from arvados import StreamReader, StreamFileReader
from arvados._ranges import Range
@@ -223,13 +224,6 @@ class StreamRetryTestMixin(object):
reader = self.reader_for('bar_file')
self.assertEqual(b'bar', self.read_for_test(reader, 3))
- @tutil.skip_sleep
- def test_read_no_default_retry(self):
- with tutil.mock_keep_responses('', 500):
- reader = self.reader_for('user_agreement')
- with self.assertRaises(arvados.errors.KeepReadError):
- self.read_for_test(reader, 10)
-
@tutil.skip_sleep
def test_read_with_instance_retries(self):
with tutil.mock_keep_responses('foo', 500, 200):
diff --git a/sdk/python/tests/test_util.py b/sdk/python/tests/test_util.py
index 4dba9ce3dc..75d4a89e30 100644
--- a/sdk/python/tests/test_util.py
+++ b/sdk/python/tests/test_util.py
@@ -2,10 +2,14 @@
#
# SPDX-License-Identifier: Apache-2.0
+import itertools
import os
+import parameterized
import subprocess
import unittest
+from unittest import mock
+
import arvados
import arvados.util
@@ -54,6 +58,12 @@ class KeysetTestHelper:
self.n += 1
return self.expect[self.n-1][1]
+_SELECT_FAKE_ITEM = {
+ 'uuid': 'zzzzz-zyyyz-zzzzzyyyyywwwww',
+ 'name': 'KeysetListAllTestCase.test_select mock',
+ 'created_at': '2023-08-28T12:34:56.123456Z',
+}
+
class KeysetListAllTestCase(unittest.TestCase):
def test_empty(self):
ks = KeysetTestHelper([[
@@ -163,7 +173,6 @@ class KeysetListAllTestCase(unittest.TestCase):
ls = list(arvados.util.keyset_list_all(ks.fn, filters=[["foo", ">", "bar"]]))
self.assertEqual(ls, [{"created_at": "1", "uuid": "1"}, {"created_at": "2", "uuid": "2"}])
-
def test_onepage_desc(self):
ks = KeysetTestHelper([[
{"limit": 1000, "count": "none", "order": ["created_at desc", "uuid desc"], "filters": []},
@@ -175,3 +184,35 @@ class KeysetListAllTestCase(unittest.TestCase):
ls = list(arvados.util.keyset_list_all(ks.fn, ascending=False))
self.assertEqual(ls, [{"created_at": "2", "uuid": "2"}, {"created_at": "1", "uuid": "1"}])
+
+ @parameterized.parameterized.expand(zip(
+ itertools.cycle(_SELECT_FAKE_ITEM),
+ itertools.chain.from_iterable(
+ itertools.combinations(_SELECT_FAKE_ITEM, count)
+ for count in range(len(_SELECT_FAKE_ITEM) + 1)
+ ),
+ ))
+ def test_select(self, order_key, select):
+ # keyset_list_all must have both uuid and order_key to function.
+ # Test that it selects those fields along with user-specified ones.
+ expect_select = {'uuid', order_key, *select}
+ item = {
+ key: value
+ for key, value in _SELECT_FAKE_ITEM.items()
+ if key in expect_select
+ }
+ list_func = mock.Mock()
+ list_func().execute = mock.Mock(
+ side_effect=[
+ {'items': [item]},
+ {'items': []},
+ {'items': []},
+ ],
+ )
+ list_func.reset_mock()
+ actual = list(arvados.util.keyset_list_all(list_func, order_key, select=list(select)))
+ self.assertEqual(actual, [item])
+ calls = list_func.call_args_list
+ self.assertTrue(len(calls) >= 2, "list_func() not called enough to exhaust items")
+ for args, kwargs in calls:
+ self.assertEqual(set(kwargs.get('select', ())), expect_select)
diff --git a/sdk/python/tests/test_vocabulary.py b/sdk/python/tests/test_vocabulary.py
index aa2e739e20..2f5db3b9d9 100644
--- a/sdk/python/tests/test_vocabulary.py
+++ b/sdk/python/tests/test_vocabulary.py
@@ -4,7 +4,8 @@
import arvados
import unittest
-import mock
+
+from unittest import mock
from arvados import api, vocabulary
diff --git a/sdk/ruby-google-api-client/.gitignore b/sdk/ruby-google-api-client/.gitignore
new file mode 100644
index 0000000000..fb4875a9a6
--- /dev/null
+++ b/sdk/ruby-google-api-client/.gitignore
@@ -0,0 +1,20 @@
+._*
+.DS_Store
+.yardoc
+.bundle
+.rvmrc
+Gemfile.lock
+coverage
+doc
+heckling
+pkg
+specdoc
+wiki
+.google-api.yaml
+*.log
+
+#IntelliJ
+.idea
+*.iml
+atlassian*
+
diff --git a/sdk/ruby-google-api-client/.rspec b/sdk/ruby-google-api-client/.rspec
new file mode 100644
index 0000000000..7438fbe51d
--- /dev/null
+++ b/sdk/ruby-google-api-client/.rspec
@@ -0,0 +1,2 @@
+--colour
+--format documentation
diff --git a/sdk/ruby-google-api-client/.travis.yml b/sdk/ruby-google-api-client/.travis.yml
new file mode 100644
index 0000000000..2a453720d8
--- /dev/null
+++ b/sdk/ruby-google-api-client/.travis.yml
@@ -0,0 +1,23 @@
+language: ruby
+rvm:
+ - 2.2
+ - 2.0.0
+ - 2.1
+ - 1.9.3
+ - rbx-2
+ - jruby
+env:
+ - RAILS_VERSION="~>3.2"
+ - RAILS_VERSION="~>4.0.0"
+ - RAILS_VERSION="~>4.1.0"
+ - RAILS_VERSION="~>4.2.0"
+script: "bundle exec rake spec:all"
+before_install:
+ - sudo apt-get update
+ - sudo apt-get install idn
+notifications:
+ email:
+ recipients:
+ - sbazyl@google.com
+ on_success: change
+ on_failure: change
diff --git a/sdk/ruby-google-api-client/.yardopts b/sdk/ruby-google-api-client/.yardopts
new file mode 100644
index 0000000000..fa8f29d036
--- /dev/null
+++ b/sdk/ruby-google-api-client/.yardopts
@@ -0,0 +1,7 @@
+--markup markdown
+lib/**/*.rb
+ext/**/*.c
+-
+README.md
+CHANGELOG.md
+LICENSE
diff --git a/sdk/ruby-google-api-client/CHANGELOG.md b/sdk/ruby-google-api-client/CHANGELOG.md
new file mode 100644
index 0000000000..34e7dfaa2a
--- /dev/null
+++ b/sdk/ruby-google-api-client/CHANGELOG.md
@@ -0,0 +1,178 @@
+# 0.8.8
+* Do not put CR/LF in http headers
+
+# 0.8.7
+* Lock activesupport version to < 5.0
+
+# 0.8.6
+* Use discovered 'rootUrl' as base URI for services
+* Respect discovered methods with colons in path
+
+# 0.8.5
+* Corrects the regression Rails 4 support in the 0.8.4 release.
+
+# 0.8.4
+* Fixes a file permission issues with the 0.8.3 release
+* Fixes warnings when the library is used
+
+# 0.8.3
+* Adds support for authorization via Application Default Credentials.
+# Adds support for tracking coverage on coveralls.io
+
+# 0.8.2
+* Fixes for file storage and missing cacerts file
+
+# 0.8.1
+* Fix logger in rails
+
+# 0.8.0
+* Refactored credential storage, added support for redis
+* Update gem depdendencies
+* Fixed retry logic to allow for auth retries independent of the overall number of retries
+* Added `:force_encoding` option to set body content encoding based on the Content-Type header
+* Batch requests with the service interface now inherit the service's connection
+* `register_discover_document` now returns the API instance
+* Added `:proxy` option to set Faraday's HTTP proxy setting
+* Added `:faraday_options` option to allow passthrough settings to Faraday connection
+* Drop 1.8.x support
+* This will be the last release with 1.9.x support
+
+# 0.7.1
+* Minor fix to update gem dependencies
+
+# 0.7.0
+* Remove CLI
+* Support for automatic retires & backoff. Off by default, enable by setting `retries` on `APIClient`
+* Experimental new interface (see `Google::APIClient::Service`)
+* Fix warnings when using Faraday separately
+* Support Google Compute Engine service accounts
+* Enable gzip compression for responses
+* Upgrade to Faraday 0.9.0. Resolves multiple issues with query parameter encodings.
+* Use bundled root certificates for verifying SSL certificates
+* Rewind media when retrying uploads
+
+# 0.6.4
+* Pin signet version to 0.4.x
+
+# 0.6.3
+
+* Update autoparse to 0.3.3 to fix cases where results aren't correctly parsed.
+* Fix railtie loading for compatibility with rails < 3.0
+* Fix refresh of access token when passing credentials as parameter to execute
+* Fix URI processing in batch requests to allow query parameters
+
+# 0.6.2
+
+* Update signet to 0.4.6 to support server side continuation of postmessage
+ auth flows.
+
+# 0.6.1
+
+* Fix impersonation with service accounts
+
+# 0.6
+
+* Apps strongly encouraged to set :application_name & :application_version when
+ initializing a client
+* JWT/service accounts moved to signet
+* Added helper class for installed app OAuth flows, updated samples & CLI
+* Initial logging support for client
+* Fix PKCS12 loading on windows
+* Allow disabling auto-refresh of OAuth 2 access tokens
+* Compatibility with MultiJson >= 1.0.0 & Rails 3.2.8
+* Fix for body serialization when body doesn't respond to to_json
+* Remove OAuth 1.0 logins from CLI
+
+
+# 0.5.0
+
+* Beta candidate, potential incompatible changes with how requests are processed.
+ * All requests should be made using execute() or execute!()
+ * :api_method in request can no longer be a string
+ * Deprecated ResumableUpload.send_* methods.
+* Reduce memory utilization when uploading large files
+* Automatic refresh of OAuth 2 credentials & retry of request when 401 errors
+ are returned
+* Simplify internal request processing.
+
+# 0.4.7
+
+* Added the ability to convert client secrets to an authorization object
+
+# 0.4.6
+
+* Backwards compatibility for MultiJson
+
+# 0.4.5
+
+* Updated Launchy dependency
+* Updated Faraday dependency
+* Updated Addressable dependency
+* Updated Autoparse dependency
+* Removed Sinatra development dependency
+
+# 0.4.4
+
+* Added batch execution
+* Added service accounts
+* Can now supply authorization on a per-request basis.
+
+# 0.4.3
+
+* Added media upload capabilities
+* Support serializing OAuth credentials to client_secrets.json
+* Fixed OS name/version string on JRuby
+
+# 0.4.2
+
+* Fixed incompatibility with Ruby 1.8.7
+
+# 0.4.1
+
+* Fixed ancestor checking issue when assigning Autoparse identifiers
+* Renamed discovery methods to avoid collisions with some APIs
+* Updated autoparse dependency to avoid JSON bug
+
+# 0.4.0
+
+* Replaced httpadapter gem dependency with faraday
+* Replaced json gem dependency with multi_json
+* Fixed /dev/null issues on Windows
+* Repeated parameters now work
+
+# 0.3.0
+
+* Updated to use v1 of the discovery API
+* Updated to use httpadapter 1.0.0
+* Added OAuth 2 support to the command line tool
+* Renamed some switches in the command line tool
+* Added additional configuration capabilities
+* Fixed a few deprecation warnings from dependencies
+* Added gemspec to source control
+
+# 0.2.0
+
+* Updated to use v1 of the discovery API
+* Updated to use httpadapter 1.0.0
+* Added OAuth 2 support to the command line tool
+* Renamed some switches in the command line tool
+* Added additional configuration capabilities
+
+# 0.1.3
+
+* Added support for manual overrides of the discovery URI
+* Added support for manual overrides of the API base
+* Added support for xoauth_requestor_id
+
+# 0.1.2
+
+* Added support for two-legged OAuth
+* Moved some development dependencies into runtime
+
+# 0.1.1
+
+* Substantial improvements to the command line interface
+
+# 0.1.0
+
+* Initial release
diff --git a/sdk/ruby-google-api-client/CONTRIBUTING.md b/sdk/ruby-google-api-client/CONTRIBUTING.md
new file mode 100644
index 0000000000..1e65911f85
--- /dev/null
+++ b/sdk/ruby-google-api-client/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to become a contributor and submit your own code
+
+## Contributor License Agreements
+
+We'd love to accept your sample apps and patches! Before we can take them, we
+have to jump a couple of legal hurdles.
+
+Please fill out either the individual or corporate Contributor License Agreement
+(CLA).
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual CLA]
+ (http://code.google.com/legal/individual-cla-v1.0.html).
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA]
+ (http://code.google.com/legal/corporate-cla-v1.0.html).
+
+Follow either of the two links above to access the appropriate CLA and
+instructions for how to sign and return it. Once we receive it, we'll be able to
+accept your pull requests.
+
+## Contributing A Patch
+
+1. Submit an issue describing your proposed change to the repo in question.
+1. The repo owner will respond to your issue promptly.
+1. If your proposed change is accepted, and you haven't already done so, sign a
+ Contributor License Agreement (see details above).
+1. Fork the desired repo, develop and test your code changes.
+1. Ensure that your code is clear and comprehensible.
+1. Ensure that your code has an appropriate set of unit tests which all pass.
+1. Submit a pull request.
+
diff --git a/sdk/ruby-google-api-client/Gemfile b/sdk/ruby-google-api-client/Gemfile
new file mode 100644
index 0000000000..9e6d43ad8e
--- /dev/null
+++ b/sdk/ruby-google-api-client/Gemfile
@@ -0,0 +1,9 @@
+source 'https://rubygems.org'
+
+gemspec
+
+gem 'jruby-openssl', :platforms => :jruby
+
+if ENV['RAILS_VERSION']
+ gem 'rails', ENV['RAILS_VERSION']
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/LICENSE b/sdk/ruby-google-api-client/LICENSE
new file mode 100644
index 0000000000..ef51da2b0e
--- /dev/null
+++ b/sdk/ruby-google-api-client/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/sdk/ruby-google-api-client/README.md b/sdk/ruby-google-api-client/README.md
new file mode 100644
index 0000000000..e0b95adfbe
--- /dev/null
+++ b/sdk/ruby-google-api-client/README.md
@@ -0,0 +1,7 @@
+# Arvados Google API Client
+
+This is a fork of the google-api-client gem, based on https://github.com/google/google-api-ruby-client version 0.8.6.
+
+It adds compatibility fixes for newer versions of dependencies (Ruby, faraday, etc.) while avoiding the breaking API changes that have been made in the upstream project.
+
+It is entirely focused on the use cases needed by the Arvados Ruby SDK and is not intended or expected to work elsewhere.
diff --git a/sdk/ruby-google-api-client/Rakefile b/sdk/ruby-google-api-client/Rakefile
new file mode 100644
index 0000000000..dca3b09038
--- /dev/null
+++ b/sdk/ruby-google-api-client/Rakefile
@@ -0,0 +1,41 @@
+# -*- ruby -*-
+lib_dir = File.expand_path('../lib', __FILE__)
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+
+require 'bundler/gem_tasks'
+require 'rubygems'
+require 'rake'
+
+require File.join(File.dirname(__FILE__), 'lib/google/api_client', 'version')
+
+PKG_DISPLAY_NAME = 'Google API Client'
+PKG_NAME = PKG_DISPLAY_NAME.downcase.gsub(/\s/, '-')
+PKG_VERSION = Google::APIClient::VERSION::STRING
+PKG_FILE_NAME = "#{PKG_NAME}-#{PKG_VERSION}"
+PKG_HOMEPAGE = 'https://github.com/google/google-api-ruby-client'
+
+RELEASE_NAME = "REL #{PKG_VERSION}"
+
+PKG_AUTHOR = ["Bob Aman", "Steve Bazyl"]
+PKG_AUTHOR_EMAIL = "sbazyl@google.com"
+PKG_SUMMARY = 'Package Summary'
+PKG_DESCRIPTION = <<-TEXT
+The Google API Ruby Client makes it trivial to discover and access supported
+APIs.
+TEXT
+
+list = FileList[
+ 'lib/**/*', 'spec/**/*', 'vendor/**/*',
+ 'tasks/**/*', 'website/**/*',
+ '[A-Z]*', 'Rakefile'
+].exclude(/[_\.]git$/)
+(open(".gitignore") { |file| file.read }).split("\n").each do |pattern|
+ list.exclude(pattern)
+end
+PKG_FILES = list
+
+task :default => 'spec'
+
+WINDOWS = (RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/) rescue false
+SUDO = WINDOWS ? '' : ('sudo' unless ENV['SUDOLESS'])
diff --git a/sdk/ruby-google-api-client/arvados-google-api-client.gemspec b/sdk/ruby-google-api-client/arvados-google-api-client.gemspec
new file mode 100644
index 0000000000..123180ae1c
--- /dev/null
+++ b/sdk/ruby-google-api-client/arvados-google-api-client.gemspec
@@ -0,0 +1,51 @@
+# -*- encoding: utf-8 -*-
+require File.join(File.dirname(__FILE__), 'lib/google/api_client', 'version')
+
+Gem::Specification.new do |s|
+ s.name = "arvados-google-api-client"
+ s.version = Google::APIClient::VERSION::STRING
+
+ s.required_ruby_version = '>= 2.7.0'
+ s.required_rubygems_version = ">= 1.3.5"
+ s.require_paths = ["lib"]
+ s.authors = ["Bob Aman", "Steven Bazyl"]
+ s.license = "Apache-2.0"
+ s.description = "Fork of google-api-client used by Ruby-based Arvados components."
+ s.email = "dev@arvados.org"
+ s.extra_rdoc_files = ["README.md"]
+ s.files = %w(arvados-google-api-client.gemspec Rakefile LICENSE CHANGELOG.md README.md Gemfile)
+ s.files += Dir.glob("lib/**/*.rb")
+ s.files += Dir.glob("lib/cacerts.pem")
+ s.files += Dir.glob("spec/**/*.{rb,opts}")
+ s.files += Dir.glob("vendor/**/*.rb")
+ s.files += Dir.glob("tasks/**/*")
+ s.files += Dir.glob("website/**/*")
+ s.homepage = "https://github.com/arvados/arvados/tree/main/sdk/ruby-google-api-client"
+ s.rdoc_options = ["--main", "README.md"]
+ s.summary = "Fork of google-api-client used by Ruby-based Arvados components."
+
+ s.add_runtime_dependency 'addressable', '~> 2.3'
+ s.add_runtime_dependency 'signet', '~> 0.16.0'
+ # faraday requires Ruby 3.0 starting with 2.9.0. If you install this gem
+ # on Ruby 2.7, the dependency resolver asks you to resolve the conflict
+ # manually. Instead of teaching all our tooling to do that, we prefer to
+ # require the latest version that supports Ruby 2.7 here. This requirement
+ # can be relaxed to '~> 2.0' when we drop support for Ruby 2.7.
+ s.add_runtime_dependency 'faraday', '~> 2.8.0'
+ s.add_runtime_dependency 'faraday-multipart', '~> 1.0'
+ s.add_runtime_dependency 'faraday-gzip', '~> 2.0'
+ s.add_runtime_dependency 'googleauth', '~> 1.0'
+ s.add_runtime_dependency 'multi_json', '~> 1.10'
+ s.add_runtime_dependency 'autoparse', '~> 0.3'
+ s.add_runtime_dependency 'extlib', '~> 0.9'
+ s.add_runtime_dependency 'launchy', '~> 2.4'
+ s.add_runtime_dependency 'retriable', '~> 1.4'
+ s.add_runtime_dependency 'activesupport', '>= 3.2', '< 8.0'
+
+ s.add_development_dependency 'rake', '~> 10.0'
+ s.add_development_dependency 'yard', '~> 0.8'
+ s.add_development_dependency 'rspec', '~> 3.1'
+ s.add_development_dependency 'kramdown', '~> 1.5'
+ s.add_development_dependency 'simplecov', '~> 0.9.2'
+ s.add_development_dependency 'coveralls', '~> 0.7.11'
+end
diff --git a/sdk/ruby-google-api-client/lib/cacerts.pem b/sdk/ruby-google-api-client/lib/cacerts.pem
new file mode 100644
index 0000000000..70990f1f82
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/cacerts.pem
@@ -0,0 +1,2183 @@
+# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
+# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
+# Label: "GTE CyberTrust Global Root"
+# Serial: 421
+# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db
+# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74
+# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36
+-----BEGIN CERTIFICATE-----
+MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD
+VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv
+bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv
+b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV
+UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
+cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
+b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH
+iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS
+r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4
+04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r
+GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9
+3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P
+lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/
+-----END CERTIFICATE-----
+
+# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Label: "Thawte Server CA"
+# Serial: 1
+# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d
+# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c
+# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9
+-----BEGIN CERTIFICATE-----
+MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx
+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
+VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
+biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm
+MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx
+MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT
+DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3
+dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl
+cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3
+DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD
+gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91
+yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX
+L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj
+EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG
+7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e
+QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ
+qdq5snUb9kLy78fyGPmJvKP/iiMucEc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Subject: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division
+# Label: "Thawte Premium Server CA"
+# Serial: 1
+# MD5 Fingerprint: 06:9f:69:79:16:66:90:02:1b:8c:8c:a2:c3:07:6f:3a
+# SHA1 Fingerprint: 62:7f:8d:78:27:65:63:99:d2:7d:7f:90:44:c9:fe:b3:f3:3e:fa:9a
+# SHA256 Fingerprint: ab:70:36:36:5c:71:54:aa:29:c2:c2:9f:5d:41:91:16:3b:16:2a:22:25:01:13:57:d5:6d:07:ff:a7:bc:1f:72
+-----BEGIN CERTIFICATE-----
+MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx
+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
+VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
+biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy
+dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t
+MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB
+MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG
+A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp
+b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl
+cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv
+bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE
+VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ
+ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR
+uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG
+9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI
+hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM
+pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Equifax OU=Equifax Secure Certificate Authority
+# Subject: O=Equifax OU=Equifax Secure Certificate Authority
+# Label: "Equifax Secure CA"
+# Serial: 903804111
+# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4
+# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a
+# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
+UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy
+dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1
+MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx
+dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B
+AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f
+BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A
+cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC
+AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ
+MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm
+aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw
+ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj
+IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF
+MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
+A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y
+7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh
+1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4
+-----END CERTIFICATE-----
+
+# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Label: "Verisign Class 3 Public Primary Certification Authority"
+# Serial: 149843929435818692848040365716851702463
+# MD5 Fingerprint: 10:fc:63:5d:f6:26:3e:0d:f3:25:be:5f:79:cd:67:67
+# SHA1 Fingerprint: 74:2c:31:92:e6:07:e4:24:eb:45:49:54:2b:e1:bb:c5:3e:61:74:e2
+# SHA256 Fingerprint: e7:68:56:34:ef:ac:f6:9a:ce:93:9a:6b:25:5b:7b:4f:ab:ef:42:93:5b:50:a2:65:ac:b5:cb:60:27:e4:4e:70
+-----BEGIN CERTIFICATE-----
+MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz
+cyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2
+MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV
+BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt
+YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN
+ADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE
+BarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is
+I19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G
+CSqGSIb3DQEBAgUAA4GBALtMEivPLCYATxQT3ab7/AoRhIzzKBxnki98tsX63/Do
+lbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59AhWM1pF+NEHJwZRDmJXNyc
+AA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2OmufTqj/ZA1k
+-----END CERTIFICATE-----
+
+# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network
+# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network
+# Label: "Verisign Class 3 Public Primary Certification Authority - G2"
+# Serial: 167285380242319648451154478808036881606
+# MD5 Fingerprint: a2:33:9b:4c:74:78:73:d4:6c:e7:c1:f3:8d:cb:5c:e9
+# SHA1 Fingerprint: 85:37:1c:a6:e5:50:14:3d:ce:28:03:47:1b:de:3a:09:e8:f8:77:0f
+# SHA256 Fingerprint: 83:ce:3c:12:29:68:8a:59:3d:48:5f:81:97:3c:0f:91:95:43:1e:da:37:cc:5e:36:43:0e:79:c7:a8:88:63:8b
+-----BEGIN CERTIFICATE-----
+MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ
+BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh
+c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy
+MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp
+emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X
+DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw
+FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg
+UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo
+YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5
+MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4
+pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0
+13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID
+AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk
+U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i
+F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY
+oJ2daZH9
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Label: "GlobalSign Root CA - R2"
+# Serial: 4835703278459682885658125
+# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
+# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
+# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority
+# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority
+# Label: "ValiCert Class 1 VA"
+# Serial: 1
+# MD5 Fingerprint: 65:58:ab:15:ad:57:6c:1e:a8:a7:b5:69:ac:bf:ff:eb
+# SHA1 Fingerprint: e5:df:74:3c:b6:01:c4:9b:98:43:dc:ab:8c:e8:6a:81:10:9f:e4:8e
+# SHA256 Fingerprint: f4:c1:49:55:1a:30:13:a3:5b:c7:bf:fe:17:a7:f3:44:9b:c1:ab:5b:5a:0a:e7:4b:06:c2:3b:90:00:4c:01:04
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIyMjM0OFoXDTE5MDYy
+NTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9Y
+LqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIiGQj4/xEjm84H9b9pGib+
+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCmDuJWBQ8Y
+TfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0
+LBwGlN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLW
+I8sogTLDAHkY7FkXicnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPw
+nXS3qT6gpf+2SQMT2iLM7XGCK5nPOrf1LXLI
+-----END CERTIFICATE-----
+
+# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority
+# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority
+# Label: "ValiCert Class 2 VA"
+# Serial: 1
+# MD5 Fingerprint: a9:23:75:9b:ba:49:36:6e:31:c2:db:f2:e7:66:ba:87
+# SHA1 Fingerprint: 31:7a:2a:d0:7f:2b:33:5e:f5:a1:c3:4e:4b:57:e8:b7:d8:f1:fc:a6
+# SHA256 Fingerprint: 58:d0:17:27:9c:d4:dc:63:ab:dd:b1:96:a6:c9:90:6c:30:c4:e0:87:83:ea:e8:c1:60:99:54:d6:93:55:59:6b
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy
+NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY
+dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9
+WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS
+v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v
+UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu
+IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC
+W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd
+-----END CERTIFICATE-----
+
+# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority
+# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority
+# Label: "RSA Root Certificate 1"
+# Serial: 1
+# MD5 Fingerprint: a2:6f:53:b7:ee:40:db:4a:68:e7:fa:18:d9:10:4b:72
+# SHA1 Fingerprint: 69:bd:8c:f4:9c:d3:00:fb:59:2e:17:93:ca:55:6a:f3:ec:aa:35:fb
+# SHA256 Fingerprint: bc:23:f9:8a:31:3c:b9:2d:e3:bb:fc:3a:5a:9f:44:61:ac:39:49:4c:4a:e1:5a:9e:9d:f1:31:e9:9b:73:01:9a
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
+# Serial: 206684696279472310254277870180966723415
+# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
+# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
+# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
+N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
+KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
+kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
+CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
+Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
+imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
+2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
+DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
+F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
+TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 4 Public Primary Certification Authority - G3"
+# Serial: 314531972711909413743075096039378935511
+# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df
+# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d
+# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1
+GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ
++mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd
+U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm
+NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY
+ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/
+ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1
+CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq
+g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm
+fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c
+2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/
+bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Secure Server CA"
+# Serial: 927650371
+# MD5 Fingerprint: df:f2:80:73:cc:f1:e6:61:73:fc:f5:42:e9:c5:7c:ee
+# SHA1 Fingerprint: 99:a6:9b:e6:1a:fe:88:6b:4d:2b:82:00:7c:b8:54:fc:31:7e:15:39
+# SHA256 Fingerprint: 62:f2:40:27:8c:56:4c:4d:d8:bf:7d:9d:4f:6f:36:6e:a8:94:d2:2f:5f:34:d9:89:a9:83:ac:ec:2f:ff:ed:50
+-----BEGIN CERTIFICATE-----
+MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC
+VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u
+ZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc
+KGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u
+ZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1
+MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE
+ChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j
+b3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF
+bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg
+U2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA
+A4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/
+I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3
+wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC
+AdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb
+oIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5
+BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p
+dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk
+MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp
+b24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu
+dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0
+MFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi
+E1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa
+MAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI
+hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN
+95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd
+2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946059622
+# MD5 Fingerprint: ba:21:ea:20:d6:dd:db:8f:c1:57:8b:40:ad:a1:fc:fc
+# SHA1 Fingerprint: 80:1d:62:d0:7b:44:9d:5c:5c:03:5c:98:ea:61:fa:44:3c:2a:58:fe
+# SHA256 Fingerprint: d1:c3:39:ea:27:84:eb:87:0f:93:4f:c5:63:4e:4a:a9:ad:55:05:01:64:01:f2:64:65:d3:7a:57:46:63:35:9f
+-----BEGIN CERTIFICATE-----
+MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy
+MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA
+vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G
+CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA
+WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo
+oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ
+h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18
+f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN
+B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy
+vUxFnmG6v4SBkgPR0ml8xQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc.
+# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc.
+# Label: "Equifax Secure Global eBusiness CA"
+# Serial: 1
+# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc
+# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45
+# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07
+-----BEGIN CERTIFICATE-----
+MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT
+ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw
+MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj
+dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l
+c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC
+UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc
+58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/
+o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr
+aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA
+A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA
+Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv
+8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV
+-----END CERTIFICATE-----
+
+# Issuer: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc.
+# Subject: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc.
+# Label: "Equifax Secure eBusiness CA 1"
+# Serial: 4
+# MD5 Fingerprint: 64:9c:ef:2e:44:fc:c6:8f:52:07:d0:51:73:8f:cb:3d
+# SHA1 Fingerprint: da:40:18:8b:91:89:a3:ed:ee:ae:da:97:fe:2f:9d:f5:b7:d1:8a:41
+# SHA256 Fingerprint: cf:56:ff:46:a4:a1:86:10:9d:d9:65:84:b5:ee:b5:8a:51:0c:42:75:b0:e5:f9:4f:40:bb:ae:86:5e:19:f6:73
+-----BEGIN CERTIFICATE-----
+MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT
+ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw
+MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j
+LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ
+KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo
+RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu
+WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw
+Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD
+AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK
+eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM
+zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+
+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN
+/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ==
+-----END CERTIFICATE-----
+
+# Issuer: O=Equifax Secure OU=Equifax Secure eBusiness CA-2
+# Subject: O=Equifax Secure OU=Equifax Secure eBusiness CA-2
+# Label: "Equifax Secure eBusiness CA 2"
+# Serial: 930140085
+# MD5 Fingerprint: aa:bf:bf:64:97:da:98:1d:6f:c6:08:3a:95:70:33:ca
+# SHA1 Fingerprint: 39:4f:f6:85:0b:06:be:52:e5:18:56:cc:10:e1:80:e8:82:b3:85:cc
+# SHA256 Fingerprint: 2f:27:4e:48:ab:a4:ac:7b:76:59:33:10:17:75:50:6d:c3:0e:e3:8e:f6:ac:d5:c0:49:32:cf:e0:41:23:42:20
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
+UzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2Vj
+dXJlIGVCdXNpbmVzcyBDQS0yMB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0
+NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkVxdWlmYXggU2VjdXJlMSYwJAYD
+VQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCBnzANBgkqhkiG9w0B
+AQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn2Z0G
+vxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/
+BPO3QSQ5BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0C
+AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJl
+IGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTkw
+NjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9euSBIplBq
+y/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQF
+MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
+A4GBAAyGgq3oThr1jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy
+0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1
+E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUmV+GRMOrN
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Label: "AddTrust Low-Value Services Root"
+# Serial: 1
+# MD5 Fingerprint: 1e:42:95:02:33:92:6b:b9:5f:c0:7f:da:d6:b2:4b:fc
+# SHA1 Fingerprint: cc:ab:0e:a0:4c:23:01:d6:69:7b:dd:37:9f:cd:12:eb:24:e3:94:9d
+# SHA256 Fingerprint: 8c:72:09:27:9a:c0:4e:27:5e:16:d0:7f:d3:b7:75:e8:01:54:b5:96:80:46:e3:1f:52:dd:25:76:63:24:e9:a7
+-----BEGIN CERTIFICATE-----
+MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw
+MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML
+QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD
+VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul
+CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n
+tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl
+dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch
+PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC
++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O
+BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl
+MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk
+ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB
+IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X
+7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz
+43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY
+eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl
+pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA
+WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Label: "AddTrust External Root"
+# Serial: 1
+# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
+# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
+# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Subject: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Label: "AddTrust Public Services Root"
+# Serial: 1
+# MD5 Fingerprint: c1:62:3e:23:c5:82:73:9c:03:59:4b:2b:e9:77:49:7f
+# SHA1 Fingerprint: 2a:b6:28:48:5e:78:fb:f3:ad:9e:79:10:dd:6b:df:99:72:2c:96:e5
+# SHA256 Fingerprint: 07:91:ca:07:49:b2:07:82:aa:d3:c7:d7:bd:0c:df:c9:48:58:35:84:3e:b2:d7:99:60:09:ce:43:ab:6c:69:27
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx
+MDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB
+ZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV
+BAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV
+6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX
+GCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP
+dzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH
+1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF
+62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW
+BBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL
+MAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU
+cnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv
+b3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6
+IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/
+iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao
+GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh
+4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm
+XiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Subject: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network
+# Label: "AddTrust Qualified Certificates Root"
+# Serial: 1
+# MD5 Fingerprint: 27:ec:39:47:cd:da:5a:af:e2:9a:01:65:21:a9:4c:bb
+# SHA1 Fingerprint: 4d:23:78:ec:91:95:39:b5:00:7f:75:8f:03:3b:21:1e:c5:4d:8b:cf
+# SHA256 Fingerprint: 80:95:21:08:05:db:4b:bc:35:5e:44:28:d8:fd:6e:c2:cd:e3:ab:5f:b9:7a:99:42:98:8e:b8:f4:dc:d0:60:16
+-----BEGIN CERTIFICATE-----
+MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1
+MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK
+EwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh
+BgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq
+xBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G
+87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i
+2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U
+WfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1
+0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G
+A1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr
+pGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL
+ExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm
+aWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv
+hsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm
+hpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X
+dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3
+P6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y
+iQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no
+xqE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Label: "GeoTrust Global CA"
+# Serial: 144470
+# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
+# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
+# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Global CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Global CA 2"
+# Serial: 1
+# MD5 Fingerprint: 0e:40:a7:6c:de:03:5d:8f:d1:0f:e4:d1:8d:f9:6c:a9
+# SHA1 Fingerprint: a9:e9:78:08:14:37:58:88:f2:05:19:b0:6d:2b:0d:2b:60:16:90:7d
+# SHA256 Fingerprint: ca:2d:82:a0:86:77:07:2f:8a:b6:76:4f:f0:35:67:6c:fe:3e:5e:32:5e:01:21:72:df:3f:92:09:6d:b7:9b:85
+-----BEGIN CERTIFICATE-----
+MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs
+IENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg
+R2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A
+PRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8
+Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL
+TytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL
+5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7
+S4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe
+2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
+FHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap
+EBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td
+EPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv
+/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN
+A0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0
+abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF
+I8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz
+4iIprn2DQKi6bA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA"
+# Serial: 1
+# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
+# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
+# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
+IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
+VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
+cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
+QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
+F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
+c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
+mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
+VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
+teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
+f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
+Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
+nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
+MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
+9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
+IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
+ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
+uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
+Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
+QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
+koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
+ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
+DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
+bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA 2"
+# Serial: 1
+# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
+# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
+# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
+VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
+c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
+WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
+FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
+XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
+se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
+KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
+IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
+y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
+hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
+QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
+Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
+HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
+KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
+L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
+Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
+ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
+T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
+GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
+1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
+OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
+6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
+QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
+# Issuer: CN=America Online Root Certification Authority 1 O=America Online Inc.
+# Subject: CN=America Online Root Certification Authority 1 O=America Online Inc.
+# Label: "America Online Root Certification Authority 1"
+# Serial: 1
+# MD5 Fingerprint: 14:f1:08:ad:9d:fa:64:e2:89:e7:1c:cf:a8:ad:7d:5e
+# SHA1 Fingerprint: 39:21:c1:15:c1:5d:0e:ca:5c:cb:5b:c4:f0:7d:21:d8:05:0b:56:6a
+# SHA256 Fingerprint: 77:40:73:12:c6:3a:15:3d:5b:c0:0b:4e:51:75:9c:df:da:c2:37:dc:2a:33:b6:79:46:e9:8e:9b:fa:68:0a:e3
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP
+bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2
+MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft
+ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg
+Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lk
+hsmj76CGv2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym
+1BW32J/X3HGrfpq/m44zDyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsW
+OqMFf6Dch9Wc/HKpoH145LcxVR5lu9RhsCFg7RAycsWSJR74kEoYeEfffjA3PlAb
+2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP8c9GsEsPPt2IYriMqQko
+O3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAU
+AK3Zo/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB
+BQUAA4IBAQB8itEfGDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkF
+Zu90821fnZmv9ov761KyBZiibyrFVL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAb
+LjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft3OJvx8Fi8eNy1gTIdGcL+oir
+oQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43gKd8hdIaC2y+C
+MMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds
+sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7
+-----END CERTIFICATE-----
+
+# Issuer: CN=America Online Root Certification Authority 2 O=America Online Inc.
+# Subject: CN=America Online Root Certification Authority 2 O=America Online Inc.
+# Label: "America Online Root Certification Authority 2"
+# Serial: 1
+# MD5 Fingerprint: d6:ed:3c:ca:e2:66:0f:af:10:43:0d:77:9b:04:09:bf
+# SHA1 Fingerprint: 85:b5:ff:67:9b:0c:79:96:1f:c8:6e:44:22:00:46:13:db:17:92:84
+# SHA256 Fingerprint: 7d:3b:46:5a:60:14:e5:26:c0:af:fc:ee:21:27:d2:31:17:27:ad:81:1c:26:84:2d:00:6a:f3:73:06:cc:80:bd
+-----BEGIN CERTIFICATE-----
+MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP
+bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2
+MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft
+ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg
+Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC
+206B89enfHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFci
+KtZHgVdEglZTvYYUAQv8f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2
+JxhP7JsowtS013wMPgwr38oE18aO6lhOqKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9
+BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JNRvCAOVIyD+OEsnpD8l7e
+Xz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0gBe4lL8B
+PeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67
+Xnfn6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEq
+Z8A9W6Wa6897GqidFEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZ
+o2C7HK2JNDJiuEMhBnIMoVxtRsX6Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3
++L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnjB453cMor9H124HhnAgMBAAGj
+YzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3OpaaEg5+31IqEj
+FNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE
+AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmn
+xPBUlgtk87FYT15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2
+LHo1YGwRgJfMqZJS5ivmae2p+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzccc
+obGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXgJXUjhx5c3LqdsKyzadsXg8n33gy8
+CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//ZoyzH1kUQ7rVyZ2OuMe
+IjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgOZtMA
+DjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2F
+AjgQ5ANh1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUX
+Om/9riW99XJZZLF0KjhfGEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPb
+AZO1XB4Y3WRayhgoPmMEEf0cjQAPuDffZ4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQl
+Zvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuPcX/9XhmgD0uRuMRUvAaw
+RY8mkaKO/qk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Certificate Services O=Comodo CA Limited
+# Subject: CN=Secure Certificate Services O=Comodo CA Limited
+# Label: "Comodo Secure Services root"
+# Serial: 1
+# MD5 Fingerprint: d3:d9:bd:ae:9f:ac:67:24:b3:c8:1b:52:e1:b9:a9:bd
+# SHA1 Fingerprint: 4a:65:d5:f4:1d:ef:39:b8:b8:90:4a:4a:d3:64:81:33:cf:c7:a1:d1
+# SHA256 Fingerprint: bd:81:ce:3b:4f:65:91:d1:1a:67:b5:fc:7a:47:fd:ef:25:52:1b:f9:aa:4e:18:b9:e3:df:2e:34:a7:80:3b:e8
+-----BEGIN CERTIFICATE-----
+MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp
+ZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow
+fjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV
+BAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM
+cm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S
+HpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996
+CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk
+3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz
+6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV
+HQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
+EwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv
+Y2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw
+Oi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww
+DQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0
+5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj
+Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI
+gKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ
+aD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl
+izeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trusted Certificate Services O=Comodo CA Limited
+# Subject: CN=Trusted Certificate Services O=Comodo CA Limited
+# Label: "Comodo Trusted Services root"
+# Serial: 1
+# MD5 Fingerprint: 91:1b:3f:6e:cd:9e:ab:ee:07:fe:1f:71:d2:b3:61:27
+# SHA1 Fingerprint: e1:9f:e3:0e:8b:84:60:9e:80:9b:17:0d:72:a8:c5:ba:6e:14:09:bd
+# SHA256 Fingerprint: 3f:06:e5:56:81:d4:96:f5:be:16:9e:b5:38:9f:9f:2b:8f:f6:1e:17:08:df:68:81:72:48:49:cd:5d:27:cb:69
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0
+aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla
+MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO
+BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD
+VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW
+fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt
+TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL
+fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW
+1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7
+kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G
+A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v
+ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo
+dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu
+Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/
+HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32
+pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS
+jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+
+xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn
+dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi
+-----END CERTIFICATE-----
+
+# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
+# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
+# Label: "UTN DATACorp SGC Root CA"
+# Serial: 91374294542884689855167577680241077609
+# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06
+# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4
+# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48
+-----BEGIN CERTIFICATE-----
+MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
+kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
+IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
+VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
+dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
+E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
+D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
+4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
+lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
+bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
+o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
+MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
+LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
+BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
+AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
+Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
+j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
+KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
+2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
+mfnGV/TJVTl4uix5yaaIK/QI
+-----END CERTIFICATE-----
+
+# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
+# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
+# Label: "UTN USERFirst Hardware Root CA"
+# Serial: 91374294542884704022267039221184531197
+# MD5 Fingerprint: 4c:56:41:e5:0d:bb:2b:e8:ca:a3:ed:18:08:ad:43:39
+# SHA1 Fingerprint: 04:83:ed:33:99:ac:36:08:05:87:22:ed:bc:5e:46:00:e3:be:f9:d7
+# SHA256 Fingerprint: 6e:a5:47:41:d0:04:66:7e:ed:1b:48:16:63:4a:a3:a7:9e:6e:4b:96:95:0f:82:79:da:fc:8d:9b:d8:81:21:37
+-----BEGIN CERTIFICATE-----
+MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB
+lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
+SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe
+MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v
+d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh
+cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn
+0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ
+M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a
+MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd
+oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI
+DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy
+oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0
+dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy
+bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF
+BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM
+//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli
+CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE
+CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t
+3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS
+KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Label: "StartCom Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16
+# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f
+# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Label: "GeoTrust Primary Certification Authority"
+# Serial: 32798226551256963324313806436981982369
+# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
+# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
+# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA"
+# Serial: 69529181992039203566298953787712940909
+# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
+# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
+# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
+# Serial: 33037644167568058970164719475676101450
+# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
+# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
+# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
+ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
+nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
+t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
+SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
+BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
+rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
+NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
+BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
+MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
+p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
+5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
+WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
+4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
+hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Label: "Network Solutions Certificate Authority"
+# Serial: 116697915152937497490437556386812487904
+# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
+# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
+# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA
+# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA
+# Label: "TC TrustCenter Class 2 CA II"
+# Serial: 941389028203453866782103406992443
+# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23
+# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e
+# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4
+-----BEGIN CERTIFICATE-----
+MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV
+BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0
+Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1
+OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i
+SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc
+VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf
+tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg
+uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J
+XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK
+8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99
+5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3
+kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy
+dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6
+Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz
+JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290
+Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u
+TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS
+GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt
+ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8
+au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV
+hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI
+dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA
+# Subject: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA
+# Label: "TC TrustCenter Class 3 CA II"
+# Serial: 1506523511417715638772220530020799
+# MD5 Fingerprint: 56:5f:aa:80:61:12:17:f6:67:21:e6:2b:6d:61:56:8e
+# SHA1 Fingerprint: 80:25:ef:f4:6e:70:c8:d4:72:24:65:84:fe:40:3b:8a:8d:6a:db:f5
+# SHA256 Fingerprint: 8d:a0:84:fc:f9:9c:e0:77:22:f8:9b:32:05:93:98:06:fa:5c:b8:11:e1:c8:13:f6:a1:08:c7:d3:36:b3:40:8e
+-----BEGIN CERTIFICATE-----
+MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV
+BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0
+Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1
+OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i
+SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc
+VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW
+Ht4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q
+Vl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2
+1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq
+ukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1
+Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX
+XAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy
+dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6
+Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz
+JTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290
+Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u
+TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN
+irTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8
+TtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6
+g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB
+95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj
+S+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA
+# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA
+# Label: "TC TrustCenter Universal CA I"
+# Serial: 601024842042189035295619584734726
+# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c
+# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3
+# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV
+BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1
+c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx
+MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg
+R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD
+VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR
+JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T
+fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu
+jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z
+wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ
+fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD
+VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G
+CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1
+7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn
+8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs
+ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT
+ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/
+2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY
+-----END CERTIFICATE-----
+
+# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Label: "Cybertrust Global Root"
+# Serial: 4835703278459682877484360
+# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
+# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
+# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G3"
+# Serial: 28809105769928564313984085209975885599
+# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
+# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
+# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G2"
+# Serial: 71758320672825410020661621085256472406
+# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
+# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
+# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G3"
+# Serial: 127614157056681299805556476275995414779
+# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
+# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
+# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G2"
+# Serial: 80682863203381065782177908751794619243
+# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
+# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
+# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Universal Root Certification Authority"
+# Serial: 85209574734084581917763752644031726877
+# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
+# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
+# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
+# Serial: 63143484348153506665311985501458640051
+# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
+# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
+# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
+# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority
+# Label: "Verisign Class 3 Public Primary Certification Authority"
+# Serial: 80507572722862485515306429940691309246
+# MD5 Fingerprint: ef:5a:f1:33:ef:f1:cd:bb:51:02:ee:12:14:4b:96:c4
+# SHA1 Fingerprint: a1:db:63:93:91:6f:17:e4:18:55:09:40:04:15:c7:02:40:b0:ae:6b
+# SHA256 Fingerprint: a4:b6:b3:99:6f:c2:f3:06:b3:fd:86:81:bd:63:41:3d:8c:50:09:cc:4f:a3:29:c2:cc:f0:e2:fa:1b:14:03:05
+-----BEGIN CERTIFICATE-----
+MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz
+cyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2
+MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV
+BAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt
+YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN
+ADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE
+BarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is
+I19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G
+CSqGSIb3DQEBBQUAA4GBABByUqkFFBkyCEHwxWsKzH4PIRnN5GfcX6kb5sroc50i
+2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWXbj9T/UWZYB2oK0z5XqcJ
+2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/D/xwzoiQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA
+# Subject: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA
+# Label: "TC TrustCenter Universal CA III"
+# Serial: 2010889993983507346460533407902964
+# MD5 Fingerprint: 9f:dd:db:ab:ff:8e:ff:45:21:5f:f0:6c:9d:8f:fe:2b
+# SHA1 Fingerprint: 96:56:cd:7b:57:96:98:95:d0:e1:41:46:68:06:fb:b8:c6:11:06:87
+# SHA256 Fingerprint: 30:9b:4a:87:f6:ca:56:c9:31:69:aa:a9:9c:6d:98:88:54:d7:89:2b:d5:43:7e:2d:07:b2:9c:be:da:55:d3:5d
+-----BEGIN CERTIFICATE-----
+MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV
+BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1
+c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAeFw0wOTA5MDkwODE1MjdaFw0yOTEy
+MzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNUQyBUcnVzdENlbnRl
+ciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0ExKDAm
+BgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF
+5+cvAqBNLaT6hdqbJYUtQCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYv
+DIRlzg9uwliT6CwLOunBjvvya8o84pxOjuT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8v
+zArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+EutCHnNaYlAJ/Uqwa1D7KRT
+yGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1M4BDj5yj
+dipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBh
+MB8GA1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI
+4jANBgkqhkiG9w0BAQUFAAOCAQEAg8ev6n9NCjw5sWi+e22JLumzCecYV42Fmhfz
+dkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+KGwWaODIl0YgoGhnYIg5IFHY
+aAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhKBgePxLcHsU0G
+DeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV
+CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPH
+LQNjO9Po5KIqwoIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
+# Label: "StartCom Certification Authority"
+# Serial: 45
+# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16
+# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0
+# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11
+-----BEGIN CERTIFICATE-----
+MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul
+F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC
+ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w
+ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk
+aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0
+YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg
+c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93
+d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG
+CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1
+dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF
+wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS
+Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst
+0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc
+pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl
+CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF
+P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK
+1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm
+KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE
+JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ
+8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm
+fyWl8kgAwKQB2j8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd.
+# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd.
+# Label: "StartCom Certification Authority G2"
+# Serial: 59
+# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64
+# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17
+# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95
+-----BEGIN CERTIFICATE-----
+MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1
+OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG
+A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ
+JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD
+vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo
+D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/
+Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW
+RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK
+HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN
+nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM
+0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i
+UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9
+Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg
+TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
+AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL
+BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K
+2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX
+UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl
+6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK
+9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ
+HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI
+wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY
+XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l
+IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo
+hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr
+so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI
+-----END CERTIFICATE-----
diff --git a/sdk/ruby-google-api-client/lib/compat/multi_json.rb b/sdk/ruby-google-api-client/lib/compat/multi_json.rb
new file mode 100644
index 0000000000..3974f084b2
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/compat/multi_json.rb
@@ -0,0 +1,19 @@
+require 'multi_json'
+
+if !MultiJson.respond_to?(:load) || [
+ Kernel,
+ defined?(ActiveSupport::Dependencies::Loadable) && ActiveSupport::Dependencies::Loadable
+].compact.include?(MultiJson.method(:load).owner)
+ module MultiJson
+ class <
+ # :two_legged_oauth_1
+ # :oauth_1
+ # :oauth_2
+ # :google_app_default
+ #
+ # @option options [Boolean] :auto_refresh_token (true)
+ # The setting that controls whether or not the api client attempts to
+ # refresh authorization when a 401 is hit in #execute. If the token does
+ # not support it, this option is ignored.
+ # @option options [String] :application_name
+ # The name of the application using the client.
+ # @option options [String | Array | nil] :scope
+ # The scope(s) used when using google application default credentials
+ # @option options [String] :application_version
+ # The version number of the application using the client.
+ # @option options [String] :user_agent
+ # ("{app_name} google-api-ruby-client/{version} {os_name}/{os_version}")
+ # The user agent used by the client. Most developers will want to
+ # leave this value alone and use the `:application_name` option instead.
+ # @option options [String] :host ("www.googleapis.com")
+ # The API hostname used by the client. This rarely needs to be changed.
+ # @option options [String] :port (443)
+ # The port number used by the client. This rarely needs to be changed.
+ # @option options [String] :discovery_path ("/discovery/v1")
+ # The discovery base path. This rarely needs to be changed.
+ # @option options [String] :ca_file
+ # Optional set of root certificates to use when validating SSL connections.
+ # By default, a bundled set of trusted roots will be used.
+ # @options options[Hash] :force_encoding
+ # Experimental option. True if response body should be force encoded into the charset
+ # specified in the Content-Type header. Mostly intended for compressed content.
+ # @options options[Hash] :faraday_options
+ # Pass through of options to set on the Faraday connection
+ def initialize(options={})
+ logger.debug { "#{self.class} - Initializing client with options #{options}" }
+
+ # Normalize key to String to allow indifferent access.
+ options = options.inject({}) do |accu, (key, value)|
+ accu[key.to_sym] = value
+ accu
+ end
+ # Almost all API usage will have a host of 'www.googleapis.com'.
+ self.host = options[:host] || 'www.googleapis.com'
+ self.port = options[:port] || 443
+ self.discovery_path = options[:discovery_path] || '/discovery/v1'
+
+ # Most developers will want to leave this value alone and use the
+ # application_name option.
+ if options[:application_name]
+ app_name = options[:application_name]
+ app_version = options[:application_version]
+ application_string = "#{app_name}/#{app_version || '0.0.0'}"
+ else
+ logger.warn { "#{self.class} - Please provide :application_name and :application_version when initializing the client" }
+ end
+
+ proxy = options[:proxy] || Object::ENV["http_proxy"]
+
+ self.user_agent = options[:user_agent] || (
+ "#{application_string} " +
+ "google-api-ruby-client/#{Google::APIClient::VERSION::STRING} #{ENV::OS_VERSION}".strip + " (gzip)"
+ ).strip
+ # The writer method understands a few Symbols and will generate useful
+ # default authentication mechanisms.
+ self.authorization =
+ options.key?(:authorization) ? options[:authorization] : :oauth_2
+ if !options['scope'].nil? and self.authorization.respond_to?(:scope=)
+ self.authorization.scope = options['scope']
+ end
+ self.auto_refresh_token = options.fetch(:auto_refresh_token) { true }
+ self.key = options[:key]
+ self.user_ip = options[:user_ip]
+ self.retries = options.fetch(:retries) { 0 }
+ self.expired_auth_retry = options.fetch(:expired_auth_retry) { true }
+ @discovery_uris = {}
+ @discovery_documents = {}
+ @discovered_apis = {}
+ ca_file = options[:ca_file] || File.expand_path('../../cacerts.pem', __FILE__)
+ self.connection = Faraday.new do |faraday|
+ faraday.request :gzip
+ faraday.response :charset if options[:force_encoding]
+ faraday.options.params_encoder = Faraday::FlatParamsEncoder
+ faraday.ssl.ca_file = ca_file
+ faraday.ssl.verify = true
+ if faraday.respond_to?(:proxy=)
+ # faraday >= 0.6.2
+ faraday.proxy = proxy
+ else
+ # older versions of faraday
+ faraday.proxy proxy
+ end
+ faraday.adapter Faraday.default_adapter
+ if options[:faraday_option].is_a?(Hash)
+ options[:faraday_option].each_pair do |option, value|
+ faraday.options.send("#{option}=", value)
+ end
+ end
+ end
+ return self
+ end
+
+ ##
+ # Returns the authorization mechanism used by the client.
+ #
+ # @return [#generate_authenticated_request] The authorization mechanism.
+ attr_reader :authorization
+
+ ##
+ # Sets the authorization mechanism used by the client.
+ #
+ # @param [#generate_authenticated_request] new_authorization
+ # The new authorization mechanism.
+ def authorization=(new_authorization)
+ case new_authorization
+ when :oauth_1, :oauth
+ require 'signet/oauth_1/client'
+ # NOTE: Do not rely on this default value, as it may change
+ new_authorization = Signet::OAuth1::Client.new(
+ :temporary_credential_uri =>
+ 'https://www.google.com/accounts/OAuthGetRequestToken',
+ :authorization_uri =>
+ 'https://www.google.com/accounts/OAuthAuthorizeToken',
+ :token_credential_uri =>
+ 'https://www.google.com/accounts/OAuthGetAccessToken',
+ :client_credential_key => 'anonymous',
+ :client_credential_secret => 'anonymous'
+ )
+ when :two_legged_oauth_1, :two_legged_oauth
+ require 'signet/oauth_1/client'
+ # NOTE: Do not rely on this default value, as it may change
+ new_authorization = Signet::OAuth1::Client.new(
+ :client_credential_key => nil,
+ :client_credential_secret => nil,
+ :two_legged => true
+ )
+ when :google_app_default
+ require 'googleauth'
+ new_authorization = Google::Auth.get_application_default
+
+ when :oauth_2
+ require 'signet/oauth_2/client'
+ # NOTE: Do not rely on this default value, as it may change
+ new_authorization = Signet::OAuth2::Client.new(
+ :authorization_uri =>
+ 'https://accounts.google.com/o/oauth2/auth',
+ :token_credential_uri =>
+ 'https://accounts.google.com/o/oauth2/token'
+ )
+ when nil
+ # No authorization mechanism
+ else
+ if !new_authorization.respond_to?(:generate_authenticated_request)
+ raise TypeError,
+ 'Expected authorization mechanism to respond to ' +
+ '#generate_authenticated_request.'
+ end
+ end
+ @authorization = new_authorization
+ return @authorization
+ end
+
+ ##
+ # Default Faraday/HTTP connection.
+ #
+ # @return [Faraday::Connection]
+ attr_accessor :connection
+
+ ##
+ # The setting that controls whether or not the api client attempts to
+ # refresh authorization when a 401 is hit in #execute.
+ #
+ # @return [Boolean]
+ attr_accessor :auto_refresh_token
+
+ ##
+ # The application's API key issued by the API console.
+ #
+ # @return [String] The API key.
+ attr_accessor :key
+
+ ##
+ # The IP address of the user this request is being performed on behalf of.
+ #
+ # @return [String] The user's IP address.
+ attr_accessor :user_ip
+
+ ##
+ # The user agent used by the client.
+ #
+ # @return [String]
+ # The user agent string used in the User-Agent header.
+ attr_accessor :user_agent
+
+ ##
+ # The API hostname used by the client.
+ #
+ # @return [String]
+ # The API hostname. Should almost always be 'www.googleapis.com'.
+ attr_accessor :host
+
+ ##
+ # The port number used by the client.
+ #
+ # @return [String]
+ # The port number. Should almost always be 443.
+ attr_accessor :port
+
+ ##
+ # The base path used by the client for discovery.
+ #
+ # @return [String]
+ # The base path. Should almost always be '/discovery/v1'.
+ attr_accessor :discovery_path
+
+ ##
+ # Number of times to retry on recoverable errors
+ #
+ # @return [FixNum]
+ # Number of retries
+ attr_accessor :retries
+
+ ##
+ # Whether or not an expired auth token should be re-acquired
+ # (and the operation retried) regardless of retries setting
+ # @return [Boolean]
+ # Auto retry on auth expiry
+ attr_accessor :expired_auth_retry
+
+ ##
+ # Returns the URI for the directory document.
+ #
+ # @return [Addressable::URI] The URI of the directory document.
+ def directory_uri
+ return resolve_uri(self.discovery_path + '/apis')
+ end
+
+ ##
+ # Manually registers a URI as a discovery document for a specific version
+ # of an API.
+ #
+ # @param [String, Symbol] api The API name.
+ # @param [String] version The desired version of the API.
+ # @param [Addressable::URI] uri The URI of the discovery document.
+ # @return [Google::APIClient::API] The service object.
+ def register_discovery_uri(api, version, uri)
+ api = api.to_s
+ version = version || 'v1'
+ @discovery_uris["#{api}:#{version}"] = uri
+ discovered_api(api, version)
+ end
+
+ ##
+ # Returns the URI for the discovery document.
+ #
+ # @param [String, Symbol] api The API name.
+ # @param [String] version The desired version of the API.
+ # @return [Addressable::URI] The URI of the discovery document.
+ def discovery_uri(api, version=nil)
+ api = api.to_s
+ version = version || 'v1'
+ return @discovery_uris["#{api}:#{version}"] ||= (
+ resolve_uri(
+ self.discovery_path + '/apis/{api}/{version}/rest',
+ 'api' => api,
+ 'version' => version
+ )
+ )
+ end
+
+ ##
+ # Manually registers a pre-loaded discovery document for a specific version
+ # of an API.
+ #
+ # @param [String, Symbol] api The API name.
+ # @param [String] version The desired version of the API.
+ # @param [String, StringIO] discovery_document
+ # The contents of the discovery document.
+ # @return [Google::APIClient::API] The service object.
+ def register_discovery_document(api, version, discovery_document)
+ api = api.to_s
+ version = version || 'v1'
+ if discovery_document.kind_of?(StringIO)
+ discovery_document.rewind
+ discovery_document = discovery_document.string
+ elsif discovery_document.respond_to?(:to_str)
+ discovery_document = discovery_document.to_str
+ else
+ raise TypeError,
+ "Expected String or StringIO, got #{discovery_document.class}."
+ end
+ @discovery_documents["#{api}:#{version}"] =
+ MultiJson.load(discovery_document)
+ discovered_api(api, version)
+ end
+
+ ##
+ # Returns the parsed directory document.
+ #
+ # @return [Hash] The parsed JSON from the directory document.
+ def directory_document
+ return @directory_document ||= (begin
+ response = self.execute!(
+ :http_method => :get,
+ :uri => self.directory_uri,
+ :authenticated => false
+ )
+ response.data
+ end)
+ end
+
+ ##
+ # Returns the parsed discovery document.
+ #
+ # @param [String, Symbol] api The API name.
+ # @param [String] version The desired version of the API.
+ # @return [Hash] The parsed JSON from the discovery document.
+ def discovery_document(api, version=nil)
+ api = api.to_s
+ version = version || 'v1'
+ return @discovery_documents["#{api}:#{version}"] ||= (begin
+ response = self.execute!(
+ :http_method => :get,
+ :uri => self.discovery_uri(api, version),
+ :authenticated => false
+ )
+ response.data
+ end)
+ end
+
+ ##
+ # Returns all APIs published in the directory document.
+ #
+ # @return [Array] The list of available APIs.
+ def discovered_apis
+ @directory_apis ||= (begin
+ document_base = self.directory_uri
+ if self.directory_document && self.directory_document['items']
+ self.directory_document['items'].map do |discovery_document|
+ Google::APIClient::API.new(
+ document_base,
+ discovery_document
+ )
+ end
+ else
+ []
+ end
+ end)
+ end
+
+ ##
+ # Returns the service object for a given service name and service version.
+ #
+ # @param [String, Symbol] api The API name.
+ # @param [String] version The desired version of the API.
+ #
+ # @return [Google::APIClient::API] The service object.
+ def discovered_api(api, version=nil)
+ if !api.kind_of?(String) && !api.kind_of?(Symbol)
+ raise TypeError,
+ "Expected String or Symbol, got #{api.class}."
+ end
+ api = api.to_s
+ version = version || 'v1'
+ return @discovered_apis["#{api}:#{version}"] ||= begin
+ document_base = self.discovery_uri(api, version)
+ discovery_document = self.discovery_document(api, version)
+ if document_base && discovery_document
+ Google::APIClient::API.new(
+ document_base,
+ discovery_document
+ )
+ else
+ nil
+ end
+ end
+ end
+
+ ##
+ # Returns the method object for a given RPC name and service version.
+ #
+ # @param [String, Symbol] rpc_name The RPC name of the desired method.
+ # @param [String, Symbol] api The API the method is within.
+ # @param [String] version The desired version of the API.
+ #
+ # @return [Google::APIClient::Method] The method object.
+ def discovered_method(rpc_name, api, version=nil)
+ if !rpc_name.kind_of?(String) && !rpc_name.kind_of?(Symbol)
+ raise TypeError,
+ "Expected String or Symbol, got #{rpc_name.class}."
+ end
+ rpc_name = rpc_name.to_s
+ api = api.to_s
+ version = version || 'v1'
+ service = self.discovered_api(api, version)
+ if service.to_h[rpc_name]
+ return service.to_h[rpc_name]
+ else
+ return nil
+ end
+ end
+
+ ##
+ # Returns the service object with the highest version number.
+ #
+ # @note Warning : This method should be used with great care.
+ # As APIs are updated, minor differences between versions may cause
+ # incompatibilities. Requesting a specific version will avoid this issue.
+ #
+ # @param [String, Symbol] api The name of the service.
+ #
+ # @return [Google::APIClient::API] The service object.
+ def preferred_version(api)
+ if !api.kind_of?(String) && !api.kind_of?(Symbol)
+ raise TypeError,
+ "Expected String or Symbol, got #{api.class}."
+ end
+ api = api.to_s
+ return self.discovered_apis.detect do |a|
+ a.name == api && a.preferred == true
+ end
+ end
+
+ ##
+ # Verifies an ID token against a server certificate. Used to ensure that
+ # an ID token supplied by an untrusted client-side mechanism is valid.
+ # Raises an error if the token is invalid or missing.
+ #
+ # @deprecated Use the google-id-token gem for verifying JWTs
+ def verify_id_token!
+ require 'jwt'
+ require 'openssl'
+ @certificates ||= {}
+ if !self.authorization.respond_to?(:id_token)
+ raise ArgumentError, (
+ "Current authorization mechanism does not support ID tokens: " +
+ "#{self.authorization.class.to_s}"
+ )
+ elsif !self.authorization.id_token
+ raise ArgumentError, (
+ "Could not verify ID token, ID token missing. " +
+ "Scopes were: #{self.authorization.scope.inspect}"
+ )
+ else
+ check_cached_certs = lambda do
+ valid = false
+ for _key, cert in @certificates
+ begin
+ self.authorization.decoded_id_token(cert.public_key)
+ valid = true
+ rescue JWT::DecodeError, Signet::UnsafeOperationError
+ # Expected exception. Ignore, ID token has not been validated.
+ end
+ end
+ valid
+ end
+ if check_cached_certs.call()
+ return true
+ end
+ response = self.execute!(
+ :http_method => :get,
+ :uri => 'https://www.googleapis.com/oauth2/v1/certs',
+ :authenticated => false
+ )
+ @certificates.merge!(
+ Hash[MultiJson.load(response.body).map do |key, cert|
+ [key, OpenSSL::X509::Certificate.new(cert)]
+ end]
+ )
+ if check_cached_certs.call()
+ return true
+ else
+ raise InvalidIDTokenError,
+ "Could not verify ID token against any available certificate."
+ end
+ end
+ return nil
+ end
+
+ ##
+ # Generates a request.
+ #
+ # @option options [Google::APIClient::Method] :api_method
+ # The method object or the RPC name of the method being executed.
+ # @option options [Hash, Array] :parameters
+ # The parameters to send to the method.
+ # @option options [Hash, Array] :headers The HTTP headers for the request.
+ # @option options [String] :body The body of the request.
+ # @option options [String] :version ("v1")
+ # The service version. Only used if `api_method` is a `String`.
+ # @option options [#generate_authenticated_request] :authorization
+ # The authorization mechanism for the response. Used only if
+ # `:authenticated` is `true`.
+ # @option options [TrueClass, FalseClass] :authenticated (true)
+ # `true` if the request must be signed or somehow
+ # authenticated, `false` otherwise.
+ #
+ # @return [Google::APIClient::Reference] The generated request.
+ #
+ # @example
+ # request = client.generate_request(
+ # :api_method => 'plus.activities.list',
+ # :parameters =>
+ # {'collection' => 'public', 'userId' => 'me'}
+ # )
+ def generate_request(options={})
+ options = {
+ :api_client => self
+ }.merge(options)
+ return Google::APIClient::Request.new(options)
+ end
+
+ ##
+ # Executes a request, wrapping it in a Result object.
+ #
+ # @param [Google::APIClient::Request, Hash, Array] params
+ # Either a Google::APIClient::Request, a Hash, or an Array.
+ #
+ # If a Google::APIClient::Request, no other parameters are expected.
+ #
+ # If a Hash, the below parameters are handled. If an Array, the
+ # parameters are assumed to be in the below order:
+ #
+ # - (Google::APIClient::Method) api_method:
+ # The method object or the RPC name of the method being executed.
+ # - (Hash, Array) parameters:
+ # The parameters to send to the method.
+ # - (String) body: The body of the request.
+ # - (Hash, Array) headers: The HTTP headers for the request.
+ # - (Hash) options: A set of options for the request, of which:
+ # - (#generate_authenticated_request) :authorization (default: true) -
+ # The authorization mechanism for the response. Used only if
+ # `:authenticated` is `true`.
+ # - (TrueClass, FalseClass) :authenticated (default: true) -
+ # `true` if the request must be signed or somehow
+ # authenticated, `false` otherwise.
+ # - (TrueClass, FalseClass) :gzip (default: true) -
+ # `true` if gzip enabled, `false` otherwise.
+ # - (FixNum) :retries -
+ # # of times to retry on recoverable errors
+ #
+ # @return [Google::APIClient::Result] The result from the API, nil if batch.
+ #
+ # @example
+ # result = client.execute(batch_request)
+ #
+ # @example
+ # plus = client.discovered_api('plus')
+ # result = client.execute(
+ # :api_method => plus.activities.list,
+ # :parameters => {'collection' => 'public', 'userId' => 'me'}
+ # )
+ #
+ # @see Google::APIClient#generate_request
+ def execute!(*params)
+ if params.first.kind_of?(Google::APIClient::Request)
+ request = params.shift
+ options = params.shift || {}
+ else
+ # This block of code allows us to accept multiple parameter passing
+ # styles, and maintaining some backwards compatibility.
+ #
+ # Note: I'm extremely tempted to deprecate this style of execute call.
+ if params.last.respond_to?(:to_hash) && params.size == 1
+ options = params.pop
+ else
+ options = {}
+ end
+
+ options[:api_method] = params.shift if params.size > 0
+ options[:parameters] = params.shift if params.size > 0
+ options[:body] = params.shift if params.size > 0
+ options[:headers] = params.shift if params.size > 0
+ options.update(params.shift) if params.size > 0
+ request = self.generate_request(options)
+ end
+
+ request.headers['User-Agent'] ||= '' + self.user_agent unless self.user_agent.nil?
+ request.headers['Accept-Encoding'] ||= 'gzip' unless options[:gzip] == false
+ request.headers['Content-Type'] ||= ''
+ request.parameters['key'] ||= self.key unless self.key.nil?
+ request.parameters['userIp'] ||= self.user_ip unless self.user_ip.nil?
+
+ connection = options[:connection] || self.connection
+ request.authorization = options[:authorization] || self.authorization unless options[:authenticated] == false
+
+ tries = 1 + (options[:retries] || self.retries)
+ attempt = 0
+
+ Retriable.retriable :tries => tries,
+ :on => [TransmissionError],
+ :on_retry => client_error_handler,
+ :interval => lambda {|attempts| (2 ** attempts) + rand} do
+ attempt += 1
+
+ # This 2nd level retriable only catches auth errors, and supports 1 retry, which allows
+ # auth to be re-attempted without having to retry all sorts of other failures like
+ # NotFound, etc
+ Retriable.retriable :tries => ((expired_auth_retry || tries > 1) && attempt == 1) ? 2 : 1,
+ :on => [AuthorizationError],
+ :on_retry => authorization_error_handler(request.authorization) do
+ result = request.send(connection, true)
+
+ case result.status
+ when 200...300
+ result
+ when 301, 302, 303, 307
+ request = generate_request(request.to_hash.merge({
+ :uri => result.headers['location'],
+ :api_method => nil
+ }))
+ raise RedirectError.new(result.headers['location'], result)
+ when 401
+ raise AuthorizationError.new(result.error_message || 'Invalid/Expired Authentication', result)
+ when 400, 402...500
+ raise ClientError.new(result.error_message || "A client error has occurred", result)
+ when 500...600
+ raise ServerError.new(result.error_message || "A server error has occurred", result)
+ else
+ raise TransmissionError.new(result.error_message || "A transmission error has occurred", result)
+ end
+ end
+ end
+ end
+
+ ##
+ # Same as Google::APIClient#execute!, but does not raise an exception for
+ # normal API errros.
+ #
+ # @see Google::APIClient#execute
+ def execute(*params)
+ begin
+ return self.execute!(*params)
+ rescue TransmissionError => e
+ return e.result
+ end
+ end
+
+ protected
+
+ ##
+ # Resolves a URI template against the client's configured base.
+ #
+ # @api private
+ # @param [String, Addressable::URI, Addressable::Template] template
+ # The template to resolve.
+ # @param [Hash] mapping The mapping that corresponds to the template.
+ # @return [Addressable::URI] The expanded URI.
+ def resolve_uri(template, mapping={})
+ @base_uri ||= Addressable::URI.new(
+ :scheme => 'https',
+ :host => self.host,
+ :port => self.port
+ ).normalize
+ template = if template.kind_of?(Addressable::Template)
+ template.pattern
+ elsif template.respond_to?(:to_str)
+ template.to_str
+ else
+ raise TypeError,
+ "Expected String, Addressable::URI, or Addressable::Template, " +
+ "got #{template.class}."
+ end
+ return Addressable::Template.new(@base_uri + template).expand(mapping)
+ end
+
+
+ ##
+ # Returns on proc for special processing of retries for authorization errors
+ # Only 401s should be retried and only if the credentials are refreshable
+ #
+ # @param [#fetch_access_token!] authorization
+ # OAuth 2 credentials
+ # @return [Proc]
+ def authorization_error_handler(authorization)
+ can_refresh = authorization.respond_to?(:refresh_token) && auto_refresh_token
+ Proc.new do |exception, tries|
+ next unless exception.kind_of?(AuthorizationError)
+ if can_refresh
+ begin
+ logger.debug("Attempting refresh of access token & retry of request")
+ authorization.fetch_access_token!
+ next
+ rescue Signet::AuthorizationError
+ end
+ end
+ raise exception
+ end
+ end
+
+ ##
+ # Returns on proc for special processing of retries as not all client errors
+ # are recoverable. Only 401s should be retried (via authorization_error_handler)
+ #
+ # @return [Proc]
+ def client_error_handler
+ Proc.new do |exception, tries|
+ raise exception if exception.kind_of?(ClientError)
+ end
+ end
+
+ end
+
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/compute_service_account.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/compute_service_account.rb
new file mode 100644
index 0000000000..118f1e6eb1
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/compute_service_account.rb
@@ -0,0 +1,28 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'faraday'
+require 'signet/oauth_2/client'
+
+module Google
+ class APIClient
+ class ComputeServiceAccount < Signet::OAuth2::Client
+ def fetch_access_token(options={})
+ connection = options[:connection] || Faraday.default_connection
+ response = connection.get 'http://metadata/computeMetadata/v1beta1/instance/service-accounts/default/token'
+ Signet::OAuth2.parse_credentials(response.body, response.headers['content-type'])
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/file_storage.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/file_storage.rb
new file mode 100644
index 0000000000..b3d0171660
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/file_storage.rb
@@ -0,0 +1,59 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'signet/oauth_2/client'
+require_relative 'storage'
+require_relative 'storages/file_store'
+
+module Google
+ class APIClient
+
+ ##
+ # Represents cached OAuth 2 tokens stored on local disk in a
+ # JSON serialized file. Meant to resemble the serialized format
+ # http://google-api-python-client.googlecode.com/hg/docs/epy/oauth2client.file.Storage-class.html
+ #
+ # @deprecated
+ # Use {Google::APIClient::Storage} and {Google::APIClient::FileStore} instead
+ #
+ class FileStorage
+
+ attr_accessor :storage
+
+ def initialize(path)
+ store = Google::APIClient::FileStore.new(path)
+ @storage = Google::APIClient::Storage.new(store)
+ @storage.authorize
+ end
+
+ def load_credentials
+ storage.authorize
+ end
+
+ def authorization
+ storage.authorization
+ end
+
+ ##
+ # Write the credentials to the specified file.
+ #
+ # @param [Signet::OAuth2::Client] authorization
+ # Optional authorization instance. If not provided, the authorization
+ # already associated with this instance will be written.
+ def write_credentials(auth=nil)
+ storage.write_credentials(auth)
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/installed_app.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/installed_app.rb
new file mode 100644
index 0000000000..bdbb655d53
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/installed_app.rb
@@ -0,0 +1,126 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'webrick'
+require 'launchy'
+
+module Google
+ class APIClient
+
+ # Small helper for the sample apps for performing OAuth 2.0 flows from the command
+ # line or in any other installed app environment.
+ #
+ # @example
+ #
+ # client = Google::APIClient.new
+ # flow = Google::APIClient::InstalledAppFlow.new(
+ # :client_id => '691380668085.apps.googleusercontent.com',
+ # :client_secret => '...',
+ # :scope => 'https://www.googleapis.com/auth/drive'
+ # )
+ # client.authorization = flow.authorize
+ #
+ class InstalledAppFlow
+
+ RESPONSE_BODY = <<-HTML
+
+
+
+
+ You may close this window.
+
+ HTML
+
+ ##
+ # Configure the flow
+ #
+ # @param [Hash] options The configuration parameters for the client.
+ # @option options [Fixnum] :port
+ # Port to run the embedded server on. Defaults to 9292
+ # @option options [String] :client_id
+ # A unique identifier issued to the client to identify itself to the
+ # authorization server.
+ # @option options [String] :client_secret
+ # A shared symmetric secret issued by the authorization server,
+ # which is used to authenticate the client.
+ # @option options [String] :scope
+ # The scope of the access request, expressed either as an Array
+ # or as a space-delimited String.
+ #
+ # @see Signet::OAuth2::Client
+ def initialize(options)
+ @port = options[:port] || 9292
+ @authorization = Signet::OAuth2::Client.new({
+ :authorization_uri => 'https://accounts.google.com/o/oauth2/auth',
+ :token_credential_uri => 'https://accounts.google.com/o/oauth2/token',
+ :redirect_uri => "http://localhost:#{@port}/"}.update(options)
+ )
+ end
+
+ ##
+ # Request authorization. Opens a browser and waits for response.
+ #
+ # @param [Google::APIClient::Storage] storage
+ # Optional object that responds to :write_credentials, used to serialize
+ # the OAuth 2 credentials after completing the flow.
+ #
+ # @return [Signet::OAuth2::Client]
+ # Authorization instance, nil if user cancelled.
+ def authorize(storage=nil)
+ auth = @authorization
+
+ server = WEBrick::HTTPServer.new(
+ :Port => @port,
+ :BindAddress =>"localhost",
+ :Logger => WEBrick::Log.new(STDOUT, 0),
+ :AccessLog => []
+ )
+ begin
+ trap("INT") { server.shutdown }
+
+ server.mount_proc '/' do |req, res|
+ auth.code = req.query['code']
+ if auth.code
+ auth.fetch_access_token!
+ end
+ res.status = WEBrick::HTTPStatus::RC_ACCEPTED
+ res.body = RESPONSE_BODY
+ server.stop
+ end
+
+ Launchy.open(auth.authorization_uri.to_s)
+ server.start
+ ensure
+ server.shutdown
+ end
+ if @authorization.access_token
+ if storage.respond_to?(:write_credentials)
+ storage.write_credentials(@authorization)
+ end
+ return @authorization
+ else
+ return nil
+ end
+ end
+ end
+
+ end
+end
+
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/jwt_asserter.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/jwt_asserter.rb
new file mode 100644
index 0000000000..35ad6ec8ea
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/jwt_asserter.rb
@@ -0,0 +1,126 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'jwt'
+require 'signet/oauth_2/client'
+require 'delegate'
+
+module Google
+ class APIClient
+ ##
+ # Generates access tokens using the JWT assertion profile. Requires a
+ # service account & access to the private key.
+ #
+ # @example Using Signet
+ #
+ # key = Google::APIClient::KeyUtils.load_from_pkcs12('client.p12', 'notasecret')
+ # client.authorization = Signet::OAuth2::Client.new(
+ # :token_credential_uri => 'https://accounts.google.com/o/oauth2/token',
+ # :audience => 'https://accounts.google.com/o/oauth2/token',
+ # :scope => 'https://www.googleapis.com/auth/prediction',
+ # :issuer => '123456-abcdef@developer.gserviceaccount.com',
+ # :signing_key => key)
+ # client.authorization.fetch_access_token!
+ # client.execute(...)
+ #
+ # @deprecated
+ # Service accounts are now supported directly in Signet
+ # @see https://developers.google.com/accounts/docs/OAuth2ServiceAccount
+ class JWTAsserter
+ # @return [String] ID/email of the issuing party
+ attr_accessor :issuer
+ # @return [Fixnum] How long, in seconds, the assertion is valid for
+ attr_accessor :expiry
+ # @return [Fixnum] Seconds to expand the issued at/expiry window to account for clock skew
+ attr_accessor :skew
+ # @return [String] Scopes to authorize
+ attr_reader :scope
+ # @return [String,OpenSSL::PKey] key for signing assertions
+ attr_writer :key
+ # @return [String] Algorithm used for signing
+ attr_accessor :algorithm
+
+ ##
+ # Initializes the asserter for a service account.
+ #
+ # @param [String] issuer
+ # Name/ID of the client issuing the assertion
+ # @param [String, Array] scope
+ # Scopes to authorize. May be a space delimited string or array of strings
+ # @param [String,OpenSSL::PKey] key
+ # Key for signing assertions
+ # @param [String] algorithm
+ # Algorithm to use, either 'RS256' for RSA with SHA-256
+ # or 'HS256' for HMAC with SHA-256
+ def initialize(issuer, scope, key, algorithm = "RS256")
+ self.issuer = issuer
+ self.scope = scope
+ self.expiry = 60 # 1 min default
+ self.skew = 60
+ self.key = key
+ self.algorithm = algorithm
+ end
+
+ ##
+ # Set the scopes to authorize
+ #
+ # @param [String, Array] new_scope
+ # Scopes to authorize. May be a space delimited string or array of strings
+ def scope=(new_scope)
+ case new_scope
+ when Array
+ @scope = new_scope.join(' ')
+ when String
+ @scope = new_scope
+ when nil
+ @scope = ''
+ else
+ raise TypeError, "Expected Array or String, got #{new_scope.class}"
+ end
+ end
+
+ ##
+ # Request a new access token.
+ #
+ # @param [String] person
+ # Email address of a user, if requesting a token to act on their behalf
+ # @param [Hash] options
+ # Pass through to Signet::OAuth2::Client.fetch_access_token
+ # @return [Signet::OAuth2::Client] Access token
+ #
+ # @see Signet::OAuth2::Client.fetch_access_token!
+ def authorize(person = nil, options={})
+ authorization = self.to_authorization(person)
+ authorization.fetch_access_token!(options)
+ return authorization
+ end
+
+ ##
+ # Builds a Signet OAuth2 client
+ #
+ # @return [Signet::OAuth2::Client] Access token
+ def to_authorization(person = nil)
+ return Signet::OAuth2::Client.new(
+ :token_credential_uri => 'https://accounts.google.com/o/oauth2/token',
+ :audience => 'https://accounts.google.com/o/oauth2/token',
+ :scope => self.scope,
+ :issuer => @issuer,
+ :signing_key => @key,
+ :signing_algorithm => @algorithm,
+ :person => person
+ )
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/key_utils.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/key_utils.rb
new file mode 100644
index 0000000000..6b6e0cfe5f
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/key_utils.rb
@@ -0,0 +1,93 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module Google
+ class APIClient
+ ##
+ # Helper for loading keys from the PKCS12 files downloaded when
+ # setting up service accounts at the APIs Console.
+ #
+ module KeyUtils
+ ##
+ # Loads a key from PKCS12 file, assuming a single private key
+ # is present.
+ #
+ # @param [String] keyfile
+ # Path of the PKCS12 file to load. If not a path to an actual file,
+ # assumes the string is the content of the file itself.
+ # @param [String] passphrase
+ # Passphrase for unlocking the private key
+ #
+ # @return [OpenSSL::PKey] The private key for signing assertions.
+ def self.load_from_pkcs12(keyfile, passphrase)
+ load_key(keyfile, passphrase) do |content, pass_phrase|
+ OpenSSL::PKCS12.new(content, pass_phrase).key
+ end
+ end
+
+
+ ##
+ # Loads a key from a PEM file.
+ #
+ # @param [String] keyfile
+ # Path of the PEM file to load. If not a path to an actual file,
+ # assumes the string is the content of the file itself.
+ # @param [String] passphrase
+ # Passphrase for unlocking the private key
+ #
+ # @return [OpenSSL::PKey] The private key for signing assertions.
+ #
+ def self.load_from_pem(keyfile, passphrase)
+ load_key(keyfile, passphrase) do | content, pass_phrase|
+ OpenSSL::PKey::RSA.new(content, pass_phrase)
+ end
+ end
+
+ private
+
+ ##
+ # Helper for loading keys from file or memory. Accepts a block
+ # to handle the specific file format.
+ #
+ # @param [String] keyfile
+ # Path of thefile to load. If not a path to an actual file,
+ # assumes the string is the content of the file itself.
+ # @param [String] passphrase
+ # Passphrase for unlocking the private key
+ #
+ # @yield [String, String]
+ # Key file & passphrase to extract key from
+ # @yieldparam [String] keyfile
+ # Contents of the file
+ # @yieldparam [String] passphrase
+ # Passphrase to unlock key
+ # @yieldreturn [OpenSSL::PKey]
+ # Private key
+ #
+ # @return [OpenSSL::PKey] The private key for signing assertions.
+ def self.load_key(keyfile, passphrase, &block)
+ begin
+ begin
+ content = File.open(keyfile, 'rb') { |io| io.read }
+ rescue
+ content = keyfile
+ end
+ block.call(content, passphrase)
+ rescue OpenSSL::OpenSSLError
+ raise ArgumentError.new("Invalid keyfile or passphrase")
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/pkcs12.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/pkcs12.rb
new file mode 100644
index 0000000000..94c43185db
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/pkcs12.rb
@@ -0,0 +1,41 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'google/api_client/auth/key_utils'
+module Google
+ class APIClient
+ ##
+ # Helper for loading keys from the PKCS12 files downloaded when
+ # setting up service accounts at the APIs Console.
+ #
+ module PKCS12
+ ##
+ # Loads a key from PKCS12 file, assuming a single private key
+ # is present.
+ #
+ # @param [String] keyfile
+ # Path of the PKCS12 file to load. If not a path to an actual file,
+ # assumes the string is the content of the file itself.
+ # @param [String] passphrase
+ # Passphrase for unlocking the private key
+ #
+ # @return [OpenSSL::PKey] The private key for signing assertions.
+ # @deprecated
+ # Use {Google::APIClient::KeyUtils} instead
+ def self.load_key(keyfile, passphrase)
+ KeyUtils.load_from_pkcs12(keyfile, passphrase)
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/storage.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/storage.rb
new file mode 100644
index 0000000000..c762316e75
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/storage.rb
@@ -0,0 +1,102 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'signet/oauth_2/client'
+
+module Google
+ class APIClient
+ ##
+ # Represents cached OAuth 2 tokens stored on local disk in a
+ # JSON serialized file. Meant to resemble the serialized format
+ # http://google-api-python-client.googlecode.com/hg/docs/epy/oauth2client.file.Storage-class.html
+ #
+ class Storage
+
+ AUTHORIZATION_URI = 'https://accounts.google.com/o/oauth2/auth'
+ TOKEN_CREDENTIAL_URI = 'https://accounts.google.com/o/oauth2/token'
+
+ # @return [Object] Storage object.
+ attr_accessor :store
+
+ # @return [Signet::OAuth2::Client]
+ attr_reader :authorization
+
+ ##
+ # Initializes the Storage object.
+ #
+ # @params [Object] Storage object
+ def initialize(store)
+ @store= store
+ @authorization = nil
+ end
+
+ ##
+ # Write the credentials to the specified store.
+ #
+ # @params [Signet::OAuth2::Client] authorization
+ # Optional authorization instance. If not provided, the authorization
+ # already associated with this instance will be written.
+ def write_credentials(authorization=nil)
+ @authorization = authorization if authorization
+ if @authorization.respond_to?(:refresh_token) && @authorization.refresh_token
+ store.write_credentials(credentials_hash)
+ end
+ end
+
+ ##
+ # Loads credentials and authorizes an client.
+ # @return [Object] Signet::OAuth2::Client or NIL
+ def authorize
+ @authorization = nil
+ cached_credentials = load_credentials
+ if cached_credentials && cached_credentials.size > 0
+ @authorization = Signet::OAuth2::Client.new(cached_credentials)
+ @authorization.issued_at = Time.at(cached_credentials['issued_at'].to_i)
+ self.refresh_authorization if @authorization.expired?
+ end
+ return @authorization
+ end
+
+ ##
+ # refresh credentials and save them to store
+ def refresh_authorization
+ authorization.refresh!
+ self.write_credentials
+ end
+
+ private
+
+ ##
+ # Attempt to read in credentials from the specified store.
+ def load_credentials
+ store.load_credentials
+ end
+
+ ##
+ # @return [Hash] with credentials
+ def credentials_hash
+ {
+ :access_token => authorization.access_token,
+ :authorization_uri => AUTHORIZATION_URI,
+ :client_id => authorization.client_id,
+ :client_secret => authorization.client_secret,
+ :expires_in => authorization.expires_in,
+ :refresh_token => authorization.refresh_token,
+ :token_credential_uri => TOKEN_CREDENTIAL_URI,
+ :issued_at => authorization.issued_at.to_i
+ }
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/storages/file_store.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/storages/file_store.rb
new file mode 100644
index 0000000000..cd3eae710d
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/storages/file_store.rb
@@ -0,0 +1,58 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'json'
+
+module Google
+ class APIClient
+ ##
+ # Represents cached OAuth 2 tokens stored on local disk in a
+ # JSON serialized file. Meant to resemble the serialized format
+ # http://google-api-python-client.googlecode.com/hg/docs/epy/oauth2client.file.Storage-class.html
+ #
+ class FileStore
+
+ attr_accessor :path
+
+ ##
+ # Initializes the FileStorage object.
+ #
+ # @param [String] path
+ # Path to the credentials file.
+ def initialize(path)
+ @path= path
+ end
+
+ ##
+ # Attempt to read in credentials from the specified file.
+ def load_credentials
+ open(path, 'r') { |f| JSON.parse(f.read) }
+ rescue
+ nil
+ end
+
+ ##
+ # Write the credentials to the specified file.
+ #
+ # @param [Signet::OAuth2::Client] authorization
+ # Optional authorization instance. If not provided, the authorization
+ # already associated with this instance will be written.
+ def write_credentials(credentials_hash)
+ open(self.path, 'w+') do |f|
+ f.write(credentials_hash.to_json)
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/auth/storages/redis_store.rb b/sdk/ruby-google-api-client/lib/google/api_client/auth/storages/redis_store.rb
new file mode 100644
index 0000000000..3f76f7ca86
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/auth/storages/redis_store.rb
@@ -0,0 +1,54 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'json'
+
+module Google
+ class APIClient
+ class RedisStore
+
+ DEFAULT_REDIS_CREDENTIALS_KEY = "google_api_credentials"
+
+ attr_accessor :redis
+
+ ##
+ # Initializes the RedisStore object.
+ #
+ # @params [Object] Redis instance
+ def initialize(redis, key = nil)
+ @redis= redis
+ @redis_credentials_key = key
+ end
+
+ ##
+ # Attempt to read in credentials from redis.
+ def load_credentials
+ credentials = redis.get redis_credentials_key
+ JSON.parse(credentials) if credentials
+ end
+
+ def redis_credentials_key
+ @redis_credentials_key || DEFAULT_REDIS_CREDENTIALS_KEY
+ end
+
+ ##
+ # Write the credentials to redis.
+ #
+ # @params [Hash] credentials
+ def write_credentials(credentials_hash)
+ redis.set(redis_credentials_key, credentials_hash.to_json)
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/batch.rb b/sdk/ruby-google-api-client/lib/google/api_client/batch.rb
new file mode 100644
index 0000000000..45a2e31044
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/batch.rb
@@ -0,0 +1,326 @@
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'addressable/uri'
+require 'google/api_client/reference'
+require 'securerandom'
+
+module Google
+ class APIClient
+
+ ##
+ # Helper class to contain a response to an individual batched call.
+ #
+ # @api private
+ class BatchedCallResponse
+ # @return [String] UUID of the call
+ attr_reader :call_id
+ # @return [Fixnum] HTTP status code
+ attr_accessor :status
+ # @return [Hash] HTTP response headers
+ attr_accessor :headers
+ # @return [String] HTTP response body
+ attr_accessor :body
+
+ ##
+ # Initialize the call response
+ #
+ # @param [String] call_id
+ # UUID of the original call
+ # @param [Fixnum] status
+ # HTTP status
+ # @param [Hash] headers
+ # HTTP response headers
+ # @param [#read, #to_str] body
+ # Response body
+ def initialize(call_id, status = nil, headers = nil, body = nil)
+ @call_id, @status, @headers, @body = call_id, status, headers, body
+ end
+ end
+
+ # Wraps multiple API calls into a single over-the-wire HTTP request.
+ #
+ # @example
+ #
+ # client = Google::APIClient.new
+ # urlshortener = client.discovered_api('urlshortener')
+ # batch = Google::APIClient::BatchRequest.new do |result|
+ # puts result.data
+ # end
+ #
+ # batch.add(:api_method => urlshortener.url.insert, :body_object => { 'longUrl' => 'http://example.com/foo' })
+ # batch.add(:api_method => urlshortener.url.insert, :body_object => { 'longUrl' => 'http://example.com/bar' })
+ #
+ # client.execute(batch)
+ #
+ class BatchRequest < Request
+ BATCH_BOUNDARY = "-----------RubyApiBatchRequest".freeze
+
+ # @api private
+ # @return [Array<(String,Google::APIClient::Request,Proc)] List of API calls in the batch
+ attr_reader :calls
+
+ ##
+ # Creates a new batch request.
+ #
+ # @param [Hash] options
+ # Set of options for this request.
+ # @param [Proc] block
+ # Callback for every call's response. Won't be called if a call defined
+ # a callback of its own.
+ #
+ # @return [Google::APIClient::BatchRequest]
+ # The constructed object.
+ #
+ # @yield [Google::APIClient::Result]
+ # block to be called when result ready
+ def initialize(options = {}, &block)
+ @calls = []
+ @global_callback = nil
+ @global_callback = block if block_given?
+ @last_auto_id = 0
+
+ @base_id = SecureRandom.uuid
+
+ options[:uri] ||= 'https://www.googleapis.com/batch'
+ options[:http_method] ||= 'POST'
+
+ super options
+ end
+
+ ##
+ # Add a new call to the batch request.
+ # Each call must have its own call ID; if not provided, one will
+ # automatically be generated, avoiding collisions. If duplicate call IDs
+ # are provided, an error will be thrown.
+ #
+ # @param [Hash, Google::APIClient::Request] call
+ # the call to be added.
+ # @param [String] call_id
+ # the ID to be used for this call. Must be unique
+ # @param [Proc] block
+ # callback for this call's response.
+ #
+ # @return [Google::APIClient::BatchRequest]
+ # the BatchRequest, for chaining
+ #
+ # @yield [Google::APIClient::Result]
+ # block to be called when result ready
+ def add(call, call_id = nil, &block)
+ unless call.kind_of?(Google::APIClient::Reference)
+ call = Google::APIClient::Reference.new(call)
+ end
+ call_id ||= new_id
+ if @calls.assoc(call_id)
+ raise BatchError,
+ 'A call with this ID already exists: %s' % call_id
+ end
+ callback = block_given? ? block : @global_callback
+ @calls << [call_id, call, callback]
+ return self
+ end
+
+ ##
+ # Processes the HTTP response to the batch request, issuing callbacks.
+ #
+ # @api private
+ #
+ # @param [Faraday::Response] response
+ # the HTTP response.
+ def process_http_response(response)
+ content_type = find_header('Content-Type', response.headers)
+ m = /.*boundary=(.+)/.match(content_type)
+ if m
+ boundary = m[1]
+ parts = response.body.split(/--#{Regexp.escape(boundary)}/)
+ parts = parts[1...-1]
+ parts.each do |part|
+ call_response = deserialize_call_response(part)
+ _, call, callback = @calls.assoc(call_response.call_id)
+ result = Google::APIClient::Result.new(call, call_response)
+ callback.call(result) if callback
+ end
+ end
+ Google::APIClient::Result.new(self, response)
+ end
+
+ ##
+ # Return the request body for the BatchRequest's HTTP request.
+ #
+ # @api private
+ #
+ # @return [String]
+ # the request body.
+ def to_http_request
+ if @calls.nil? || @calls.empty?
+ raise BatchError, 'Cannot make an empty batch request'
+ end
+ parts = @calls.map {|(call_id, call, _callback)| serialize_call(call_id, call)}
+ build_multipart(parts, 'multipart/mixed', BATCH_BOUNDARY)
+ super
+ end
+
+
+ protected
+
+ ##
+ # Helper method to find a header from its name, regardless of case.
+ #
+ # @api private
+ #
+ # @param [String] name
+ # the name of the header to find.
+ # @param [Hash] headers
+ # the hash of headers and their values.
+ #
+ # @return [String]
+ # the value of the desired header.
+ def find_header(name, headers)
+ _, header = headers.detect do |h, v|
+ h.downcase == name.downcase
+ end
+ return header
+ end
+
+ ##
+ # Create a new call ID. Uses an auto-incrementing, conflict-avoiding ID.
+ #
+ # @api private
+ #
+ # @return [String]
+ # the new, unique ID.
+ def new_id
+ @last_auto_id += 1
+ while @calls.assoc(@last_auto_id)
+ @last_auto_id += 1
+ end
+ return @last_auto_id.to_s
+ end
+
+ ##
+ # Convert a Content-ID header value to an id. Presumes the Content-ID
+ # header conforms to the format that id_to_header() returns.
+ #
+ # @api private
+ #
+ # @param [String] header
+ # Content-ID header value.
+ #
+ # @return [String]
+ # The extracted ID value.
+ def header_to_id(header)
+ if !header.start_with?('<') || !header.end_with?('>') ||
+ !header.include?('+')
+ raise BatchError, 'Invalid value for Content-ID: "%s"' % header
+ end
+
+ _base, call_id = header[1...-1].split('+')
+ return Addressable::URI.unencode(call_id)
+ end
+
+ ##
+ # Auxiliary method to split the headers from the body in an HTTP response.
+ #
+ # @api private
+ #
+ # @param [String] response
+ # the response to parse.
+ #
+ # @return [Array, String]
+ # the headers and the body, separately.
+ def split_headers_and_body(response)
+ headers = {}
+ payload = response.lstrip
+ while payload
+ line, payload = payload.split("\n", 2)
+ line.sub!(/\s+\z/, '')
+ break if line.empty?
+ match = /\A([^:]+):\s*/.match(line)
+ if match
+ headers[match[1]] = match.post_match
+ else
+ raise BatchError, 'Invalid header line in response: %s' % line
+ end
+ end
+ return headers, payload
+ end
+
+ ##
+ # Convert a single batched response into a BatchedCallResponse object.
+ #
+ # @api private
+ #
+ # @param [String] call_response
+ # the request to deserialize.
+ #
+ # @return [Google::APIClient::BatchedCallResponse]
+ # the parsed and converted response.
+ def deserialize_call_response(call_response)
+ outer_headers, outer_body = split_headers_and_body(call_response)
+ status_line, payload = outer_body.split("\n", 2)
+ _protocol, status, _reason = status_line.split(' ', 3)
+
+ headers, body = split_headers_and_body(payload)
+ content_id = find_header('Content-ID', outer_headers)
+ call_id = header_to_id(content_id)
+ return BatchedCallResponse.new(call_id, status.to_i, headers, body)
+ end
+
+ ##
+ # Serialize a single batched call for assembling the multipart message
+ #
+ # @api private
+ #
+ # @param [Google::APIClient::Request] call
+ # the call to serialize.
+ #
+ # @return [Faraday::UploadIO]
+ # the serialized request
+ def serialize_call(call_id, call)
+ method, uri, headers, body = call.to_http_request
+ request = "#{method.to_s.upcase} #{Addressable::URI.parse(uri).request_uri} HTTP/1.1"
+ headers.each do |header, value|
+ request << "\r\n%s: %s" % [header, value]
+ end
+ if body
+ # TODO - CompositeIO if body is a stream
+ request << "\r\n\r\n"
+ if body.respond_to?(:read)
+ request << body.read
+ else
+ request << body.to_s
+ end
+ end
+ Faraday::UploadIO.new(StringIO.new(request), 'application/http', 'ruby-api-request', 'Content-ID' => id_to_header(call_id))
+ end
+
+ ##
+ # Convert an id to a Content-ID header value.
+ #
+ # @api private
+ #
+ # @param [String] call_id
+ # identifier of individual call.
+ #
+ # @return [String]
+ # A Content-ID header with the call_id encoded into it. A UUID is
+ # prepended to the value because Content-ID headers are supposed to be
+ # universally unique.
+ def id_to_header(call_id)
+ return '<%s+%s>' % [@base_id, Addressable::URI.encode(call_id)]
+ end
+
+ end
+ end
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/charset.rb b/sdk/ruby-google-api-client/lib/google/api_client/charset.rb
new file mode 100644
index 0000000000..9668aee366
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/charset.rb
@@ -0,0 +1,33 @@
+require 'faraday'
+require 'zlib'
+
+module Google
+ class APIClient
+ class Charset < Faraday::Middleware
+ include Google::APIClient::Logging
+
+ def charset_for_content_type(type)
+ if type
+ m = type.match(/(?:charset|encoding)="?([a-z0-9-]+)"?/i)
+ if m
+ return Encoding.find(m[1])
+ end
+ end
+ nil
+ end
+
+ def adjust_encoding(env)
+ charset = charset_for_content_type(env[:response_headers]['content-type'])
+ if charset && env[:body].encoding != charset
+ env[:body].force_encoding(charset)
+ end
+ end
+
+ def on_complete(env)
+ adjust_encoding(env)
+ end
+ end
+ end
+end
+
+Faraday::Response.register_middleware :charset => Google::APIClient::Charset
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/client_secrets.rb b/sdk/ruby-google-api-client/lib/google/api_client/client_secrets.rb
new file mode 100644
index 0000000000..a9cc241389
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/client_secrets.rb
@@ -0,0 +1,179 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'compat/multi_json'
+
+
+module Google
+ class APIClient
+ ##
+ # Manages the persistence of client configuration data and secrets. Format
+ # inspired by the Google API Python client.
+ #
+ # @see https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
+ #
+ # @example
+ # {
+ # "web": {
+ # "client_id": "asdfjasdljfasdkjf",
+ # "client_secret": "1912308409123890",
+ # "redirect_uris": ["https://www.example.com/oauth2callback"],
+ # "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ # "token_uri": "https://accounts.google.com/o/oauth2/token"
+ # }
+ # }
+ #
+ # @example
+ # {
+ # "installed": {
+ # "client_id": "837647042410-75ifg...usercontent.com",
+ # "client_secret":"asdlkfjaskd",
+ # "redirect_uris": ["http://localhost", "urn:ietf:oauth:2.0:oob"],
+ # "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ # "token_uri": "https://accounts.google.com/o/oauth2/token"
+ # }
+ # }
+ class ClientSecrets
+
+ ##
+ # Reads client configuration from a file
+ #
+ # @param [String] filename
+ # Path to file to load
+ #
+ # @return [Google::APIClient::ClientSecrets]
+ # OAuth client settings
+ def self.load(filename=nil)
+ if filename && File.directory?(filename)
+ search_path = File.expand_path(filename)
+ filename = nil
+ end
+ while filename == nil
+ search_path ||= File.expand_path('.')
+ if File.exists?(File.join(search_path, 'client_secrets.json'))
+ filename = File.join(search_path, 'client_secrets.json')
+ elsif search_path == '/' || search_path =~ /[a-zA-Z]:[\/\\]/
+ raise ArgumentError,
+ 'No client_secrets.json filename supplied ' +
+ 'and/or could not be found in search path.'
+ else
+ search_path = File.expand_path(File.join(search_path, '..'))
+ end
+ end
+ data = File.open(filename, 'r') { |file| MultiJson.load(file.read) }
+ return self.new(data)
+ end
+
+ ##
+ # Intialize OAuth client settings.
+ #
+ # @param [Hash] options
+ # Parsed client secrets files
+ def initialize(options={})
+ # Client auth configuration
+ @flow = options[:flow] || options.keys.first.to_s || 'web'
+ fdata = options[@flow]
+ @client_id = fdata[:client_id] || fdata["client_id"]
+ @client_secret = fdata[:client_secret] || fdata["client_secret"]
+ @redirect_uris = fdata[:redirect_uris] || fdata["redirect_uris"]
+ @redirect_uris ||= [fdata[:redirect_uri] || fdata["redirect_uri"]].compact
+ @javascript_origins = (
+ fdata[:javascript_origins] ||
+ fdata["javascript_origins"]
+ )
+ @javascript_origins ||= [fdata[:javascript_origin] || fdata["javascript_origin"]].compact
+ @authorization_uri = fdata[:auth_uri] || fdata["auth_uri"]
+ @authorization_uri ||= fdata[:authorization_uri]
+ @token_credential_uri = fdata[:token_uri] || fdata["token_uri"]
+ @token_credential_uri ||= fdata[:token_credential_uri]
+
+ # Associated token info
+ @access_token = fdata[:access_token] || fdata["access_token"]
+ @refresh_token = fdata[:refresh_token] || fdata["refresh_token"]
+ @id_token = fdata[:id_token] || fdata["id_token"]
+ @expires_in = fdata[:expires_in] || fdata["expires_in"]
+ @expires_at = fdata[:expires_at] || fdata["expires_at"]
+ @issued_at = fdata[:issued_at] || fdata["issued_at"]
+ end
+
+ attr_reader(
+ :flow, :client_id, :client_secret, :redirect_uris, :javascript_origins,
+ :authorization_uri, :token_credential_uri, :access_token,
+ :refresh_token, :id_token, :expires_in, :expires_at, :issued_at
+ )
+
+ ##
+ # Serialize back to the original JSON form
+ #
+ # @return [String]
+ # JSON
+ def to_json
+ return MultiJson.dump(to_hash)
+ end
+
+ def to_hash
+ {
+ self.flow => ({
+ 'client_id' => self.client_id,
+ 'client_secret' => self.client_secret,
+ 'redirect_uris' => self.redirect_uris,
+ 'javascript_origins' => self.javascript_origins,
+ 'auth_uri' => self.authorization_uri,
+ 'token_uri' => self.token_credential_uri,
+ 'access_token' => self.access_token,
+ 'refresh_token' => self.refresh_token,
+ 'id_token' => self.id_token,
+ 'expires_in' => self.expires_in,
+ 'expires_at' => self.expires_at,
+ 'issued_at' => self.issued_at
+ }).inject({}) do |accu, (k, v)|
+ # Prunes empty values from JSON output.
+ unless v == nil || (v.respond_to?(:empty?) && v.empty?)
+ accu[k] = v
+ end
+ accu
+ end
+ }
+ end
+
+ def to_authorization
+ gem 'signet', '>= 0.4.0'
+ require 'signet/oauth_2/client'
+ # NOTE: Do not rely on this default value, as it may change
+ new_authorization = Signet::OAuth2::Client.new
+ new_authorization.client_id = self.client_id
+ new_authorization.client_secret = self.client_secret
+ new_authorization.authorization_uri = (
+ self.authorization_uri ||
+ 'https://accounts.google.com/o/oauth2/auth'
+ )
+ new_authorization.token_credential_uri = (
+ self.token_credential_uri ||
+ 'https://accounts.google.com/o/oauth2/token'
+ )
+ new_authorization.redirect_uri = self.redirect_uris.first
+
+ # These are supported, but unlikely.
+ new_authorization.access_token = self.access_token
+ new_authorization.refresh_token = self.refresh_token
+ new_authorization.id_token = self.id_token
+ new_authorization.expires_in = self.expires_in
+ new_authorization.issued_at = self.issued_at if self.issued_at
+ new_authorization.expires_at = self.expires_at if self.expires_at
+ return new_authorization
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/discovery.rb b/sdk/ruby-google-api-client/lib/google/api_client/discovery.rb
new file mode 100644
index 0000000000..bb01d67ce7
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/discovery.rb
@@ -0,0 +1,19 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'google/api_client/discovery/api'
+require 'google/api_client/discovery/resource'
+require 'google/api_client/discovery/method'
+require 'google/api_client/discovery/schema'
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/discovery/api.rb b/sdk/ruby-google-api-client/lib/google/api_client/discovery/api.rb
new file mode 100644
index 0000000000..3bbc90da3f
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/discovery/api.rb
@@ -0,0 +1,310 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'addressable/uri'
+require 'multi_json'
+require 'active_support/inflector'
+require 'google/api_client/discovery/resource'
+require 'google/api_client/discovery/method'
+require 'google/api_client/discovery/media'
+
+module Google
+ class APIClient
+ ##
+ # A service that has been described by a discovery document.
+ class API
+
+ ##
+ # Creates a description of a particular version of a service.
+ #
+ # @param [String] document_base
+ # Base URI for the discovery document.
+ # @param [Hash] discovery_document
+ # The section of the discovery document that applies to this service
+ # version.
+ #
+ # @return [Google::APIClient::API] The constructed service object.
+ def initialize(document_base, discovery_document)
+ @document_base = Addressable::URI.parse(document_base)
+ @discovery_document = discovery_document
+ metaclass = (class << self; self; end)
+ self.discovered_resources.each do |resource|
+ method_name = ActiveSupport::Inflector.underscore(resource.name).to_sym
+ if !self.respond_to?(method_name)
+ metaclass.send(:define_method, method_name) { resource }
+ end
+ end
+ self.discovered_methods.each do |method|
+ method_name = ActiveSupport::Inflector.underscore(method.name).to_sym
+ if !self.respond_to?(method_name)
+ metaclass.send(:define_method, method_name) { method }
+ end
+ end
+ end
+
+ # @return [String] unparsed discovery document for the API
+ attr_reader :discovery_document
+
+ ##
+ # Returns the id of the service.
+ #
+ # @return [String] The service id.
+ def id
+ return (
+ @discovery_document['id'] ||
+ "#{self.name}:#{self.version}"
+ )
+ end
+
+ ##
+ # Returns the identifier for the service.
+ #
+ # @return [String] The service identifier.
+ def name
+ return @discovery_document['name']
+ end
+
+ ##
+ # Returns the version of the service.
+ #
+ # @return [String] The service version.
+ def version
+ return @discovery_document['version']
+ end
+
+ ##
+ # Returns a human-readable title for the API.
+ #
+ # @return [Hash] The API title.
+ def title
+ return @discovery_document['title']
+ end
+
+ ##
+ # Returns a human-readable description of the API.
+ #
+ # @return [Hash] The API description.
+ def description
+ return @discovery_document['description']
+ end
+
+ ##
+ # Returns a URI for the API documentation.
+ #
+ # @return [Hash] The API documentation.
+ def documentation
+ return Addressable::URI.parse(@discovery_document['documentationLink'])
+ end
+
+ ##
+ # Returns true if this is the preferred version of this API.
+ #
+ # @return [TrueClass, FalseClass]
+ # Whether or not this is the preferred version of this API.
+ def preferred
+ return !!@discovery_document['preferred']
+ end
+
+ ##
+ # Returns the list of API features.
+ #
+ # @return [Array]
+ # The features supported by this API.
+ def features
+ return @discovery_document['features'] || []
+ end
+
+ ##
+ # Returns the root URI for this service.
+ #
+ # @return [Addressable::URI] The root URI.
+ def root_uri
+ return @root_uri ||= (
+ Addressable::URI.parse(self.discovery_document['rootUrl'])
+ )
+ end
+
+ ##
+ # Returns true if this API uses a data wrapper.
+ #
+ # @return [TrueClass, FalseClass]
+ # Whether or not this API uses a data wrapper.
+ def data_wrapper?
+ return self.features.include?('dataWrapper')
+ end
+
+ ##
+ # Returns the base URI for the discovery document.
+ #
+ # @return [Addressable::URI] The base URI.
+ attr_reader :document_base
+
+ ##
+ # Returns the base URI for this version of the service.
+ #
+ # @return [Addressable::URI] The base URI that methods are joined to.
+ def method_base
+ if @discovery_document['basePath']
+ return @method_base ||= (
+ self.root_uri.join(Addressable::URI.parse(@discovery_document['basePath']))
+ ).normalize
+ else
+ return nil
+ end
+ end
+
+ ##
+ # Updates the hierarchy of resources and methods with the new base.
+ #
+ # @param [Addressable::URI, #to_str, String] new_method_base
+ # The new base URI to use for the service.
+ def method_base=(new_method_base)
+ @method_base = Addressable::URI.parse(new_method_base)
+ self.discovered_resources.each do |resource|
+ resource.method_base = @method_base
+ end
+ self.discovered_methods.each do |method|
+ method.method_base = @method_base
+ end
+ end
+
+ ##
+ # Returns the base URI for batch calls to this service.
+ #
+ # @return [Addressable::URI] The base URI that methods are joined to.
+ def batch_path
+ if @discovery_document['batchPath']
+ return @batch_path ||= (
+ self.document_base.join(Addressable::URI.parse('/' +
+ @discovery_document['batchPath']))
+ ).normalize
+ else
+ return nil
+ end
+ end
+
+ ##
+ # A list of schemas available for this version of the API.
+ #
+ # @return [Hash] A list of {Google::APIClient::Schema} objects.
+ def schemas
+ return @schemas ||= (
+ (@discovery_document['schemas'] || []).inject({}) do |accu, (k, v)|
+ accu[k] = Google::APIClient::Schema.parse(self, v)
+ accu
+ end
+ )
+ end
+
+ ##
+ # Returns a schema for a kind value.
+ #
+ # @return [Google::APIClient::Schema] The associated Schema object.
+ def schema_for_kind(kind)
+ api_name, schema_name = kind.split('#', 2)
+ if api_name != self.name
+ raise ArgumentError,
+ "The kind does not match this API. " +
+ "Expected '#{self.name}', got '#{api_name}'."
+ end
+ for k, v in self.schemas
+ return v if k.downcase == schema_name.downcase
+ end
+ return nil
+ end
+
+ ##
+ # A list of resources available at the root level of this version of the
+ # API.
+ #
+ # @return [Array] A list of {Google::APIClient::Resource} objects.
+ def discovered_resources
+ return @discovered_resources ||= (
+ (@discovery_document['resources'] || []).inject([]) do |accu, (k, v)|
+ accu << Google::APIClient::Resource.new(
+ self, self.method_base, k, v
+ )
+ accu
+ end
+ )
+ end
+
+ ##
+ # A list of methods available at the root level of this version of the
+ # API.
+ #
+ # @return [Array] A list of {Google::APIClient::Method} objects.
+ def discovered_methods
+ return @discovered_methods ||= (
+ (@discovery_document['methods'] || []).inject([]) do |accu, (k, v)|
+ accu << Google::APIClient::Method.new(self, self.method_base, k, v)
+ accu
+ end
+ )
+ end
+
+ ##
+ # Allows deep inspection of the discovery document.
+ def [](key)
+ return @discovery_document[key]
+ end
+
+ ##
+ # Converts the service to a flat mapping of RPC names and method objects.
+ #
+ # @return [Hash] All methods available on the service.
+ #
+ # @example
+ # # Discover available methods
+ # method_names = client.discovered_api('buzz').to_h.keys
+ def to_h
+ return @hash ||= (begin
+ methods_hash = {}
+ self.discovered_methods.each do |method|
+ methods_hash[method.id] = method
+ end
+ self.discovered_resources.each do |resource|
+ methods_hash.merge!(resource.to_h)
+ end
+ methods_hash
+ end)
+ end
+
+ ##
+ # Returns a String
representation of the service's state.
+ #
+ # @return [String] The service's state, as a String
.
+ def inspect
+ sprintf(
+ "#<%s:%#0x ID:%s>", self.class.to_s, self.object_id, self.id
+ )
+ end
+
+ ##
+ # Marshalling support - serialize the API to a string (doc base + original
+ # discovery document).
+ def _dump(level)
+ MultiJson.dump([@document_base.to_s, @discovery_document])
+ end
+
+ ##
+ # Marshalling support - Restore an API instance from serialized form
+ def self._load(obj)
+ new(*MultiJson.load(obj))
+ end
+
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/discovery/media.rb b/sdk/ruby-google-api-client/lib/google/api_client/discovery/media.rb
new file mode 100644
index 0000000000..ffa7e87c3d
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/discovery/media.rb
@@ -0,0 +1,77 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'addressable/uri'
+require 'addressable/template'
+
+require 'google/api_client/errors'
+
+
+module Google
+ class APIClient
+ ##
+ # Media upload elements for discovered methods
+ class MediaUpload
+
+ ##
+ # Creates a description of a particular method.
+ #
+ # @param [Google::APIClient::API] api
+ # Base discovery document for the API
+ # @param [Addressable::URI] method_base
+ # The base URI for the service.
+ # @param [Hash] discovery_document
+ # The media upload section of the discovery document.
+ #
+ # @return [Google::APIClient::Method] The constructed method object.
+ def initialize(api, method_base, discovery_document)
+ @api = api
+ @method_base = method_base
+ @discovery_document = discovery_document
+ end
+
+ ##
+ # List of acceptable mime types
+ #
+ # @return [Array]
+ # List of acceptable mime types for uploaded content
+ def accepted_types
+ @discovery_document['accept']
+ end
+
+ ##
+ # Maximum size of an uplad
+ # TODO: Parse & convert to numeric value
+ #
+ # @return [String]
+ def max_size
+ @discovery_document['maxSize']
+ end
+
+ ##
+ # Returns the URI template for the method. A parameter list can be
+ # used to expand this into a URI.
+ #
+ # @return [Addressable::Template] The URI template.
+ def uri_template
+ return @uri_template ||= Addressable::Template.new(
+ @api.method_base.join(Addressable::URI.parse(@discovery_document['protocols']['simple']['path']))
+ )
+ end
+
+ end
+
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/discovery/method.rb b/sdk/ruby-google-api-client/lib/google/api_client/discovery/method.rb
new file mode 100644
index 0000000000..3a06857c0e
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/discovery/method.rb
@@ -0,0 +1,363 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'addressable/uri'
+require 'addressable/template'
+
+require 'google/api_client/errors'
+
+
+module Google
+ class APIClient
+ ##
+ # A method that has been described by a discovery document.
+ class Method
+
+ ##
+ # Creates a description of a particular method.
+ #
+ # @param [Google::APIClient::API] api
+ # The API this method belongs to.
+ # @param [Addressable::URI] method_base
+ # The base URI for the service.
+ # @param [String] method_name
+ # The identifier for the method.
+ # @param [Hash] discovery_document
+ # The section of the discovery document that applies to this method.
+ #
+ # @return [Google::APIClient::Method] The constructed method object.
+ def initialize(api, method_base, method_name, discovery_document)
+ @api = api
+ @method_base = method_base
+ @name = method_name
+ @discovery_document = discovery_document
+ end
+
+ # @return [String] unparsed discovery document for the method
+ attr_reader :discovery_document
+
+ ##
+ # Returns the API this method belongs to.
+ #
+ # @return [Google::APIClient::API] The API this method belongs to.
+ attr_reader :api
+
+ ##
+ # Returns the identifier for the method.
+ #
+ # @return [String] The method identifier.
+ attr_reader :name
+
+ ##
+ # Returns the base URI for the method.
+ #
+ # @return [Addressable::URI]
+ # The base URI that this method will be joined to.
+ attr_reader :method_base
+
+ ##
+ # Updates the method with the new base.
+ #
+ # @param [Addressable::URI, #to_str, String] new_method_base
+ # The new base URI to use for the method.
+ def method_base=(new_method_base)
+ @method_base = Addressable::URI.parse(new_method_base)
+ @uri_template = nil
+ end
+
+ ##
+ # Returns a human-readable description of the method.
+ #
+ # @return [Hash] The API description.
+ def description
+ return @discovery_document['description']
+ end
+
+ ##
+ # Returns the method ID.
+ #
+ # @return [String] The method identifier.
+ def id
+ return @discovery_document['id']
+ end
+
+ ##
+ # Returns the HTTP method or 'GET' if none is specified.
+ #
+ # @return [String] The HTTP method that will be used in the request.
+ def http_method
+ return @discovery_document['httpMethod'] || 'GET'
+ end
+
+ ##
+ # Returns the URI template for the method. A parameter list can be
+ # used to expand this into a URI.
+ #
+ # @return [Addressable::Template] The URI template.
+ def uri_template
+ return @uri_template ||= Addressable::Template.new(
+ self.method_base.join(Addressable::URI.parse("./" + @discovery_document['path']))
+ )
+ end
+
+ ##
+ # Returns media upload information for this method, if supported
+ #
+ # @return [Google::APIClient::MediaUpload] Description of upload endpoints
+ def media_upload
+ if @discovery_document['mediaUpload']
+ return @media_upload ||= Google::APIClient::MediaUpload.new(self, self.method_base, @discovery_document['mediaUpload'])
+ else
+ return nil
+ end
+ end
+
+ ##
+ # Returns the Schema object for the method's request, if any.
+ #
+ # @return [Google::APIClient::Schema] The request schema.
+ def request_schema
+ if @discovery_document['request']
+ schema_name = @discovery_document['request']['$ref']
+ return @api.schemas[schema_name]
+ else
+ return nil
+ end
+ end
+
+ ##
+ # Returns the Schema object for the method's response, if any.
+ #
+ # @return [Google::APIClient::Schema] The response schema.
+ def response_schema
+ if @discovery_document['response']
+ schema_name = @discovery_document['response']['$ref']
+ return @api.schemas[schema_name]
+ else
+ return nil
+ end
+ end
+
+ ##
+ # Normalizes parameters, converting to the appropriate types.
+ #
+ # @param [Hash, Array] parameters
+ # The parameters to normalize.
+ #
+ # @return [Hash] The normalized parameters.
+ def normalize_parameters(parameters={})
+ # Convert keys to Strings when appropriate
+ if parameters.kind_of?(Hash) || parameters.kind_of?(Array)
+ # Returning an array since parameters can be repeated (ie, Adsense Management API)
+ parameters = parameters.inject([]) do |accu, (k, v)|
+ k = k.to_s if k.kind_of?(Symbol)
+ k = k.to_str if k.respond_to?(:to_str)
+ unless k.kind_of?(String)
+ raise TypeError, "Expected String, got #{k.class}."
+ end
+ accu << [k, v]
+ accu
+ end
+ else
+ raise TypeError,
+ "Expected Hash or Array, got #{parameters.class}."
+ end
+ return parameters
+ end
+
+ ##
+ # Expands the method's URI template using a parameter list.
+ #
+ # @api private
+ # @param [Hash, Array] parameters
+ # The parameter list to use.
+ #
+ # @return [Addressable::URI] The URI after expansion.
+ def generate_uri(parameters={})
+ parameters = self.normalize_parameters(parameters)
+
+ self.validate_parameters(parameters)
+ template_variables = self.uri_template.variables
+ upload_type = parameters.assoc('uploadType') || parameters.assoc('upload_type')
+ if upload_type
+ unless self.media_upload
+ raise ArgumentException, "Media upload not supported for this method"
+ end
+ case upload_type.last
+ when 'media', 'multipart', 'resumable'
+ uri = self.media_upload.uri_template.expand(parameters)
+ else
+ raise ArgumentException, "Invalid uploadType '#{upload_type}'"
+ end
+ else
+ uri = self.uri_template.expand(parameters)
+ end
+ query_parameters = parameters.reject do |k, v|
+ template_variables.include?(k)
+ end
+ # encode all non-template parameters
+ params = ""
+ unless query_parameters.empty?
+ params = "?" + Addressable::URI.form_encode(query_parameters.sort)
+ end
+ # Normalization is necessary because of undesirable percent-escaping
+ # during URI template expansion
+ return uri.normalize + params
+ end
+
+ ##
+ # Generates an HTTP request for this method.
+ #
+ # @api private
+ # @param [Hash, Array] parameters
+ # The parameters to send.
+ # @param [String, StringIO] body The body for the HTTP request.
+ # @param [Hash, Array] headers The HTTP headers for the request.
+ # @option options [Faraday::Connection] :connection
+ # The HTTP connection to use.
+ #
+ # @return [Array] The generated HTTP request.
+ def generate_request(parameters={}, body='', headers={}, options={})
+ if !headers.kind_of?(Array) && !headers.kind_of?(Hash)
+ raise TypeError, "Expected Hash or Array, got #{headers.class}."
+ end
+ method = self.http_method.to_s.downcase.to_sym
+ uri = self.generate_uri(parameters)
+ headers = Faraday::Utils::Headers.new(headers)
+ return [method, uri, headers, body]
+ end
+
+
+ ##
+ # Returns a Hash
of the parameter descriptions for
+ # this method.
+ #
+ # @return [Hash] The parameter descriptions.
+ def parameter_descriptions
+ @parameter_descriptions ||= (
+ @discovery_document['parameters'] || {}
+ ).inject({}) { |h,(k,v)| h[k]=v; h }
+ end
+
+ ##
+ # Returns an Array
of the parameters for this method.
+ #
+ # @return [Array] The parameters.
+ def parameters
+ @parameters ||= ((
+ @discovery_document['parameters'] || {}
+ ).inject({}) { |h,(k,v)| h[k]=v; h }).keys
+ end
+
+ ##
+ # Returns an Array
of the required parameters for this
+ # method.
+ #
+ # @return [Array] The required parameters.
+ #
+ # @example
+ # # A list of all required parameters.
+ # method.required_parameters
+ def required_parameters
+ @required_parameters ||= ((self.parameter_descriptions.select do |k, v|
+ v['required']
+ end).inject({}) { |h,(k,v)| h[k]=v; h }).keys
+ end
+
+ ##
+ # Returns an Array
of the optional parameters for this
+ # method.
+ #
+ # @return [Array] The optional parameters.
+ #
+ # @example
+ # # A list of all optional parameters.
+ # method.optional_parameters
+ def optional_parameters
+ @optional_parameters ||= ((self.parameter_descriptions.reject do |k, v|
+ v['required']
+ end).inject({}) { |h,(k,v)| h[k]=v; h }).keys
+ end
+
+ ##
+ # Verifies that the parameters are valid for this method. Raises an
+ # exception if validation fails.
+ #
+ # @api private
+ # @param [Hash, Array] parameters
+ # The parameters to verify.
+ #
+ # @return [NilClass] nil
if validation passes.
+ def validate_parameters(parameters={})
+ parameters = self.normalize_parameters(parameters)
+ required_variables = ((self.parameter_descriptions.select do |k, v|
+ v['required']
+ end).inject({}) { |h,(k,v)| h[k]=v; h }).keys
+ missing_variables = required_variables - parameters.map { |(k, _)| k }
+ if missing_variables.size > 0
+ raise ArgumentError,
+ "Missing required parameters: #{missing_variables.join(', ')}."
+ end
+ parameters.each do |k, v|
+ # Handle repeated parameters.
+ if self.parameter_descriptions[k] &&
+ self.parameter_descriptions[k]['repeated'] &&
+ v.kind_of?(Array)
+ # If this is a repeated parameter and we've got an array as a
+ # value, just provide the whole array to the loop below.
+ items = v
+ else
+ # If this is not a repeated parameter, or if it is but we're
+ # being given a single value, wrap the value in an array, so that
+ # the loop below still works for the single element.
+ items = [v]
+ end
+
+ items.each do |item|
+ if self.parameter_descriptions[k]
+ enum = self.parameter_descriptions[k]['enum']
+ if enum && !enum.include?(item)
+ raise ArgumentError,
+ "Parameter '#{k}' has an invalid value: #{item}. " +
+ "Must be one of #{enum.inspect}."
+ end
+ pattern = self.parameter_descriptions[k]['pattern']
+ if pattern
+ regexp = Regexp.new("^#{pattern}$")
+ if item !~ regexp
+ raise ArgumentError,
+ "Parameter '#{k}' has an invalid value: #{item}. " +
+ "Must match: /^#{pattern}$/."
+ end
+ end
+ end
+ end
+ end
+ return nil
+ end
+
+ ##
+ # Returns a String
representation of the method's state.
+ #
+ # @return [String] The method's state, as a String
.
+ def inspect
+ sprintf(
+ "#<%s:%#0x ID:%s>",
+ self.class.to_s, self.object_id, self.id
+ )
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/discovery/resource.rb b/sdk/ruby-google-api-client/lib/google/api_client/discovery/resource.rb
new file mode 100644
index 0000000000..9b757c684d
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/discovery/resource.rb
@@ -0,0 +1,156 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'addressable/uri'
+
+require 'active_support/inflector'
+require 'google/api_client/discovery/method'
+
+
+module Google
+ class APIClient
+ ##
+ # A resource that has been described by a discovery document.
+ class Resource
+
+ ##
+ # Creates a description of a particular version of a resource.
+ #
+ # @param [Google::APIClient::API] api
+ # The API this resource belongs to.
+ # @param [Addressable::URI] method_base
+ # The base URI for the service.
+ # @param [String] resource_name
+ # The identifier for the resource.
+ # @param [Hash] discovery_document
+ # The section of the discovery document that applies to this resource.
+ #
+ # @return [Google::APIClient::Resource] The constructed resource object.
+ def initialize(api, method_base, resource_name, discovery_document)
+ @api = api
+ @method_base = method_base
+ @name = resource_name
+ @discovery_document = discovery_document
+ metaclass = (class <String representation of the resource's state.
+ #
+ # @return [String] The resource's state, as a String
.
+ def inspect
+ sprintf(
+ "#<%s:%#0x NAME:%s>", self.class.to_s, self.object_id, self.name
+ )
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/discovery/schema.rb b/sdk/ruby-google-api-client/lib/google/api_client/discovery/schema.rb
new file mode 100644
index 0000000000..57666e698d
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/discovery/schema.rb
@@ -0,0 +1,117 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'time'
+require 'multi_json'
+require 'compat/multi_json'
+require 'base64'
+require 'autoparse'
+require 'addressable/uri'
+require 'addressable/template'
+
+require 'active_support/inflector'
+require 'google/api_client/errors'
+
+
+module Google
+ class APIClient
+ ##
+ # @api private
+ module Schema
+ def self.parse(api, schema_data)
+ # This method is super-long, but hard to break up due to the
+ # unavoidable dependence on closures and execution context.
+ schema_name = schema_data['id']
+
+ # Due to an oversight, schema IDs may not be URI references.
+ # TODO(bobaman): Remove this code once this has been resolved.
+ schema_uri = (
+ api.document_base +
+ (schema_name[0..0] != '#' ? '#' + schema_name : schema_name)
+ )
+
+ # Due to an oversight, schema IDs may not be URI references.
+ # TODO(bobaman): Remove this whole lambda once this has been resolved.
+ reformat_references = lambda do |data|
+ # This code is not particularly efficient due to recursive traversal
+ # and excess object creation, but this hopefully shouldn't be an
+ # issue since it should only be called only once per schema per
+ # process.
+ if data.kind_of?(Hash) &&
+ data['$ref'] && !data['$ref'].kind_of?(Hash)
+ if data['$ref'].respond_to?(:to_str)
+ reference = data['$ref'].to_str
+ else
+ raise TypeError, "Expected String, got #{data['$ref'].class}"
+ end
+ reference = '#' + reference if reference[0..0] != '#'
+ data.merge({
+ '$ref' => reference
+ })
+ elsif data.kind_of?(Hash)
+ data.inject({}) do |accu, (key, value)|
+ if value.kind_of?(Hash)
+ accu[key] = reformat_references.call(value)
+ else
+ accu[key] = value
+ end
+ accu
+ end
+ else
+ data
+ end
+ end
+ schema_data = reformat_references.call(schema_data)
+
+ if schema_name
+ api_name_string = ActiveSupport::Inflector.camelize(api.name)
+ api_version_string = ActiveSupport::Inflector.camelize(api.version).gsub('.', '_')
+ # This is for compatibility with Ruby 1.8.7.
+ # TODO(bobaman) Remove this when we eventually stop supporting 1.8.7.
+ args = []
+ args << false if Class.method(:const_defined?).arity != 1
+ if Google::APIClient::Schema.const_defined?(api_name_string, *args)
+ api_name = Google::APIClient::Schema.const_get(
+ api_name_string, *args
+ )
+ else
+ api_name = Google::APIClient::Schema.const_set(
+ api_name_string, Module.new
+ )
+ end
+ if api_name.const_defined?(api_version_string, *args)
+ api_version = api_name.const_get(api_version_string, *args)
+ else
+ api_version = api_name.const_set(api_version_string, Module.new)
+ end
+ if api_version.const_defined?(schema_name, *args)
+ schema_class = api_version.const_get(schema_name, *args)
+ end
+ end
+
+ # It's possible the schema has already been defined. If so, don't
+ # redefine it. This means that reloading a schema which has already
+ # been loaded into memory is not possible.
+ unless schema_class
+ schema_class = AutoParse.generate(schema_data, :uri => schema_uri)
+ if schema_name
+ api_version.const_set(schema_name, schema_class)
+ end
+ end
+ return schema_class
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/environment.rb b/sdk/ruby-google-api-client/lib/google/api_client/environment.rb
new file mode 100644
index 0000000000..50c84fe5cf
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/environment.rb
@@ -0,0 +1,42 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+module Google
+ class APIClient
+ module ENV
+ OS_VERSION = begin
+ if RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/
+ # TODO(bobaman)
+ # Confirm that all of these Windows environments actually have access
+ # to the `ver` command.
+ `ver`.sub(/\s*\[Version\s*/, '/').sub(']', '').strip
+ elsif RUBY_PLATFORM =~ /darwin/i
+ "Mac OS X/#{`sw_vers -productVersion`}"
+ elsif RUBY_PLATFORM == 'java'
+ # Get the information from java system properties to avoid spawning a
+ # sub-process, which is not friendly in some contexts (web servers).
+ require 'java'
+ name = java.lang.System.getProperty('os.name')
+ version = java.lang.System.getProperty('os.version')
+ "#{name}/#{version}"
+ else
+ `uname -sr`.sub(' ', '/')
+ end
+ rescue Exception
+ RUBY_PLATFORM
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/errors.rb b/sdk/ruby-google-api-client/lib/google/api_client/errors.rb
new file mode 100644
index 0000000000..9644c692a2
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/errors.rb
@@ -0,0 +1,65 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+module Google
+ class APIClient
+ ##
+ # An error which is raised when there is an unexpected response or other
+ # transport error that prevents an operation from succeeding.
+ class TransmissionError < StandardError
+ attr_reader :result
+ def initialize(message = nil, result = nil)
+ super(message)
+ @result = result
+ end
+ end
+
+ ##
+ # An exception that is raised if a redirect is required
+ #
+ class RedirectError < TransmissionError
+ end
+
+ ##
+ # An exception that is raised if a method is called with missing or
+ # invalid parameter values.
+ class ValidationError < StandardError
+ end
+
+ ##
+ # A 4xx class HTTP error occurred.
+ class ClientError < TransmissionError
+ end
+
+ ##
+ # A 401 HTTP error occurred.
+ class AuthorizationError < ClientError
+ end
+
+ ##
+ # A 5xx class HTTP error occurred.
+ class ServerError < TransmissionError
+ end
+
+ ##
+ # An exception that is raised if an ID token could not be validated.
+ class InvalidIDTokenError < StandardError
+ end
+
+ # Error class for problems in batch requests.
+ class BatchError < StandardError
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/logging.rb b/sdk/ruby-google-api-client/lib/google/api_client/logging.rb
new file mode 100644
index 0000000000..09a075b5c9
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/logging.rb
@@ -0,0 +1,32 @@
+require 'logger'
+
+module Google
+ class APIClient
+
+ class << self
+ ##
+ # Logger for the API client
+ #
+ # @return [Logger] logger instance.
+ attr_accessor :logger
+ end
+
+ self.logger = Logger.new(STDOUT)
+ self.logger.level = Logger::WARN
+
+ ##
+ # Module to make accessing the logger simpler
+ module Logging
+ ##
+ # Logger for the API client
+ #
+ # @return [Logger] logger instance.
+ def logger
+ Google::APIClient.logger
+ end
+ end
+
+ end
+
+
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/media.rb b/sdk/ruby-google-api-client/lib/google/api_client/media.rb
new file mode 100644
index 0000000000..96816d0bbb
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/media.rb
@@ -0,0 +1,260 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+require 'google/api_client/reference'
+require 'faraday/multipart'
+
+module Google
+ class APIClient
+ ##
+ # Uploadable media support. Holds an IO stream & content type.
+ #
+ # @see Faraday::UploadIO
+ # @example
+ # media = Google::APIClient::UploadIO.new('mymovie.m4v', 'video/mp4')
+ class UploadIO < Faraday::Multipart::FilePart
+
+ # @return [Fixnum] Size of chunks to upload. Default is nil, meaning upload the entire file in a single request
+ attr_accessor :chunk_size
+
+ ##
+ # Get the length of the stream
+ #
+ # @return [Fixnum]
+ # Length of stream, in bytes
+ def length
+ io.respond_to?(:length) ? io.length : File.size(local_path)
+ end
+ end
+
+ ##
+ # Wraps an input stream and limits data to a given range
+ #
+ # @example
+ # chunk = Google::APIClient::RangedIO.new(io, 0, 1000)
+ class RangedIO
+ ##
+ # Bind an input stream to a specific range.
+ #
+ # @param [IO] io
+ # Source input stream
+ # @param [Fixnum] offset
+ # Starting offset of the range
+ # @param [Fixnum] length
+ # Length of range
+ def initialize(io, offset, length)
+ @io = io
+ @offset = offset
+ @length = length
+ self.rewind
+ end
+
+ ##
+ # @see IO#read
+ def read(amount = nil, buf = nil)
+ buffer = buf || ''
+ if amount.nil?
+ size = @length - @pos
+ done = ''
+ elsif amount == 0
+ size = 0
+ done = ''
+ else
+ size = [@length - @pos, amount].min
+ done = nil
+ end
+
+ if size > 0
+ result = @io.read(size)
+ result.force_encoding("BINARY") if result.respond_to?(:force_encoding)
+ buffer << result if result
+ @pos = @pos + size
+ end
+
+ if buffer.length > 0
+ buffer
+ else
+ done
+ end
+ end
+
+ ##
+ # @see IO#rewind
+ def rewind
+ self.pos = 0
+ end
+
+ ##
+ # @see IO#pos
+ def pos
+ @pos
+ end
+
+ ##
+ # @see IO#pos=
+ def pos=(pos)
+ @pos = pos
+ @io.pos = @offset + pos
+ end
+ end
+
+ ##
+ # Resumable uploader.
+ #
+ class ResumableUpload < Request
+ # @return [Fixnum] Max bytes to send in a single request
+ attr_accessor :chunk_size
+
+ ##
+ # Creates a new uploader.
+ #
+ # @param [Hash] options
+ # Request options
+ def initialize(options={})
+ super options
+ self.uri = options[:uri]
+ self.http_method = :put
+ @offset = options[:offset] || 0
+ @complete = false
+ @expired = false
+ end
+
+ ##
+ # Sends all remaining chunks to the server
+ #
+ # @deprecated Pass the instance to {Google::APIClient#execute} instead
+ #
+ # @param [Google::APIClient] api_client
+ # API Client instance to use for sending
+ def send_all(api_client)
+ result = nil
+ until complete?
+ result = send_chunk(api_client)
+ break unless result.status == 308
+ end
+ return result
+ end
+
+
+ ##
+ # Sends the next chunk to the server
+ #
+ # @deprecated Pass the instance to {Google::APIClient#execute} instead
+ #
+ # @param [Google::APIClient] api_client
+ # API Client instance to use for sending
+ def send_chunk(api_client)
+ return api_client.execute(self)
+ end
+
+ ##
+ # Check if upload is complete
+ #
+ # @return [TrueClass, FalseClass]
+ # Whether or not the upload complete successfully
+ def complete?
+ return @complete
+ end
+
+ ##
+ # Check if the upload URL expired (upload not completed in alotted time.)
+ # Expired uploads must be restarted from the beginning
+ #
+ # @return [TrueClass, FalseClass]
+ # Whether or not the upload has expired and can not be resumed
+ def expired?
+ return @expired
+ end
+
+ ##
+ # Check if upload is resumable. That is, neither complete nor expired
+ #
+ # @return [TrueClass, FalseClass] True if upload can be resumed
+ def resumable?
+ return !(self.complete? or self.expired?)
+ end
+
+ ##
+ # Convert to an HTTP request. Returns components in order of method, URI,
+ # request headers, and body
+ #
+ # @api private
+ #
+ # @return [Array<(Symbol, Addressable::URI, Hash, [#read,#to_str])>]
+ def to_http_request
+ if @complete
+ raise Google::APIClient::ClientError, "Upload already complete"
+ elsif @offset.nil?
+ self.headers.update({
+ 'Content-Length' => "0",
+ 'Content-Range' => "bytes */#{media.length}" })
+ else
+ start_offset = @offset
+ remaining = self.media.length - start_offset
+ chunk_size = self.media.chunk_size || self.chunk_size || self.media.length
+ content_length = [remaining, chunk_size].min
+ chunk = RangedIO.new(self.media.io, start_offset, content_length)
+ end_offset = start_offset + content_length - 1
+ self.headers.update({
+ 'Content-Length' => "#{content_length}",
+ 'Content-Type' => self.media.content_type,
+ 'Content-Range' => "bytes #{start_offset}-#{end_offset}/#{media.length}" })
+ self.body = chunk
+ end
+ super
+ end
+
+ ##
+ # Check the result from the server, updating the offset and/or location
+ # if available.
+ #
+ # @api private
+ #
+ # @param [Faraday::Response] response
+ # HTTP response
+ #
+ # @return [Google::APIClient::Result]
+ # Processed API response
+ def process_http_response(response)
+ case response.status
+ when 200...299
+ @complete = true
+ when 308
+ range = response.headers['range']
+ if range
+ @offset = range.scan(/\d+/).collect{|x| Integer(x)}.last + 1
+ end
+ if response.headers['location']
+ self.uri = response.headers['location']
+ end
+ when 400...499
+ @expired = true
+ when 500...599
+ # Invalidate the offset to mark it needs to be queried on the
+ # next request
+ @offset = nil
+ end
+ return Google::APIClient::Result.new(self, response)
+ end
+
+ ##
+ # Hashified verison of the API request
+ #
+ # @return [Hash]
+ def to_hash
+ super.merge(:offset => @offset)
+ end
+
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/railtie.rb b/sdk/ruby-google-api-client/lib/google/api_client/railtie.rb
new file mode 100644
index 0000000000..86d9a6b204
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/railtie.rb
@@ -0,0 +1,18 @@
+require 'rails/railtie'
+require 'google/api_client/logging'
+
+module Google
+ class APIClient
+
+ ##
+ # Optional support class for Rails. Currently replaces the built-in logger
+ # with Rails' application log.
+ #
+ class Railtie < Rails::Railtie
+ initializer 'google-api-client' do |app|
+ logger = app.config.logger || Rails.logger
+ Google::APIClient.logger = logger unless logger.nil?
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/reference.rb b/sdk/ruby-google-api-client/lib/google/api_client/reference.rb
new file mode 100644
index 0000000000..15b34250d7
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/reference.rb
@@ -0,0 +1,27 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'google/api_client/request'
+
+module Google
+ class APIClient
+ ##
+ # Subclass of Request for backwards compatibility with pre-0.5.0 versions of the library
+ #
+ # @deprecated
+ # use {Google::APIClient::Request} instead
+ class Reference < Request
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/request.rb b/sdk/ruby-google-api-client/lib/google/api_client/request.rb
new file mode 100644
index 0000000000..3d6cc3415a
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/request.rb
@@ -0,0 +1,318 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'faraday'
+require 'compat/multi_json'
+require 'addressable/uri'
+require 'stringio'
+require 'google/api_client/discovery'
+require 'google/api_client/logging'
+
+module Google
+ class APIClient
+
+ ##
+ # Represents an API request.
+ class Request
+ include Google::APIClient::Logging
+
+ MULTIPART_BOUNDARY = "-----------RubyApiMultipartPost".freeze
+
+ # @return [Hash] Request parameters
+ attr_reader :parameters
+ # @return [Hash] Additional HTTP headers
+ attr_reader :headers
+ # @return [Google::APIClient::Method] API method to invoke
+ attr_reader :api_method
+ # @return [Google::APIClient::UploadIO] File to upload
+ attr_accessor :media
+ # @return [#generated_authenticated_request] User credentials
+ attr_accessor :authorization
+ # @return [TrueClass,FalseClass] True if request should include credentials
+ attr_accessor :authenticated
+ # @return [#read, #to_str] Request body
+ attr_accessor :body
+
+ ##
+ # Build a request
+ #
+ # @param [Hash] options
+ # @option options [Hash, Array] :parameters
+ # Request parameters for the API method.
+ # @option options [Google::APIClient::Method] :api_method
+ # API method to invoke. Either :api_method or :uri must be specified
+ # @option options [TrueClass, FalseClass] :authenticated
+ # True if request should include credentials. Implicitly true if
+ # unspecified and :authorization present
+ # @option options [#generate_signed_request] :authorization
+ # OAuth credentials
+ # @option options [Google::APIClient::UploadIO] :media
+ # File to upload, if media upload request
+ # @option options [#to_json, #to_hash] :body_object
+ # Main body of the API request. Typically hash or object that can
+ # be serialized to JSON
+ # @option options [#read, #to_str] :body
+ # Raw body to send in POST/PUT requests
+ # @option options [String, Addressable::URI] :uri
+ # URI to request. Either :api_method or :uri must be specified
+ # @option options [String, Symbol] :http_method
+ # HTTP method when requesting a URI
+ def initialize(options={})
+ @parameters = Faraday::Utils::ParamsHash.new
+ @headers = Faraday::Utils::Headers.new
+
+ self.parameters.merge!(options[:parameters]) unless options[:parameters].nil?
+ self.headers.merge!(options[:headers]) unless options[:headers].nil?
+ self.api_method = options[:api_method]
+ self.authenticated = options[:authenticated]
+ self.authorization = options[:authorization]
+
+ # These parameters are handled differently because they're not
+ # parameters to the API method, but rather to the API system.
+ self.parameters['key'] ||= options[:key] if options[:key]
+ self.parameters['userIp'] ||= options[:user_ip] if options[:user_ip]
+
+ if options[:media]
+ self.initialize_media_upload(options)
+ elsif options[:body]
+ self.body = options[:body]
+ elsif options[:body_object]
+ self.headers['Content-Type'] ||= 'application/json'
+ self.body = serialize_body(options[:body_object])
+ else
+ self.body = ''
+ end
+
+ unless self.api_method
+ self.http_method = options[:http_method] || 'GET'
+ self.uri = options[:uri]
+ end
+ end
+
+ # @!attribute [r] upload_type
+ # @return [String] protocol used for upload
+ def upload_type
+ return self.parameters['uploadType'] || self.parameters['upload_type']
+ end
+
+ # @!attribute http_method
+ # @return [Symbol] HTTP method if invoking a URI
+ def http_method
+ return @http_method ||= self.api_method.http_method.to_s.downcase.to_sym
+ end
+
+ def http_method=(new_http_method)
+ if new_http_method.kind_of?(Symbol)
+ @http_method = new_http_method.to_s.downcase.to_sym
+ elsif new_http_method.respond_to?(:to_str)
+ @http_method = new_http_method.to_s.downcase.to_sym
+ else
+ raise TypeError,
+ "Expected String or Symbol, got #{new_http_method.class}."
+ end
+ end
+
+ def api_method=(new_api_method)
+ if new_api_method.nil? || new_api_method.kind_of?(Google::APIClient::Method)
+ @api_method = new_api_method
+ else
+ raise TypeError,
+ "Expected Google::APIClient::Method, got #{new_api_method.class}."
+ end
+ end
+
+ # @!attribute uri
+ # @return [Addressable::URI] URI to send request
+ def uri
+ return @uri ||= self.api_method.generate_uri(self.parameters)
+ end
+
+ def uri=(new_uri)
+ @uri = Addressable::URI.parse(new_uri)
+ @parameters.update(@uri.query_values) unless @uri.query_values.nil?
+ end
+
+
+ # Transmits the request with the given connection
+ #
+ # @api private
+ #
+ # @param [Faraday::Connection] connection
+ # the connection to transmit with
+ # @param [TrueValue,FalseValue] is_retry
+ # True if request has been previous sent
+ #
+ # @return [Google::APIClient::Result]
+ # result of API request
+ def send(connection, is_retry = false)
+ self.body.rewind if is_retry && self.body.respond_to?(:rewind)
+ env = self.to_env(connection)
+ logger.debug { "#{self.class} Sending API request #{env[:method]} #{env[:url].to_s} #{env[:request_headers]}" }
+ http_response = connection.app.call(env)
+ result = self.process_http_response(http_response)
+
+ logger.debug { "#{self.class} Result: #{result.status} #{result.headers}" }
+
+ # Resumamble slightly different than other upload protocols in that it requires at least
+ # 2 requests.
+ if result.status == 200 && self.upload_type == 'resumable' && self.media
+ upload = result.resumable_upload
+ unless upload.complete?
+ logger.debug { "#{self.class} Sending upload body" }
+ result = upload.send(connection)
+ end
+ end
+ return result
+ end
+
+ # Convert to an HTTP request. Returns components in order of method, URI,
+ # request headers, and body
+ #
+ # @api private
+ #
+ # @return [Array<(Symbol, Addressable::URI, Hash, [#read,#to_str])>]
+ def to_http_request
+ request = (
+ if self.api_method
+ self.api_method.generate_request(self.parameters, self.body, self.headers)
+ elsif self.uri
+ unless self.parameters.empty?
+ self.uri.query = Addressable::URI.form_encode(self.parameters)
+ end
+ [self.http_method, self.uri.to_s, self.headers, self.body]
+ end)
+ return request
+ end
+
+ ##
+ # Hashified verison of the API request
+ #
+ # @return [Hash]
+ def to_hash
+ options = {}
+ if self.api_method
+ options[:api_method] = self.api_method
+ options[:parameters] = self.parameters
+ else
+ options[:http_method] = self.http_method
+ options[:uri] = self.uri
+ end
+ options[:headers] = self.headers
+ options[:body] = self.body
+ options[:media] = self.media
+ unless self.authorization.nil?
+ options[:authorization] = self.authorization
+ end
+ return options
+ end
+
+ ##
+ # Prepares the request for execution, building a hash of parts
+ # suitable for sending to Faraday::Connection.
+ #
+ # @api private
+ #
+ # @param [Faraday::Connection] connection
+ # Connection for building the request
+ #
+ # @return [Hash]
+ # Encoded request
+ def to_env(connection)
+ method, uri, headers, body = self.to_http_request
+ http_request = connection.build_request(method) do |req|
+ req.url(uri.to_s)
+ req.headers.update(headers)
+ req.body = body
+ end
+
+ if self.authorization.respond_to?(:generate_authenticated_request)
+ http_request = self.authorization.generate_authenticated_request(
+ :request => http_request,
+ :connection => connection
+ )
+ end
+
+ http_request.to_env(connection)
+ end
+
+ ##
+ # Convert HTTP response to an API Result
+ #
+ # @api private
+ #
+ # @param [Faraday::Response] response
+ # HTTP response
+ #
+ # @return [Google::APIClient::Result]
+ # Processed API response
+ def process_http_response(response)
+ Result.new(self, response)
+ end
+
+ protected
+
+ ##
+ # Adjust headers & body for media uploads
+ #
+ # @api private
+ #
+ # @param [Hash] options
+ # @option options [Hash, Array] :parameters
+ # Request parameters for the API method.
+ # @option options [Google::APIClient::UploadIO] :media
+ # File to upload, if media upload request
+ # @option options [#to_json, #to_hash] :body_object
+ # Main body of the API request. Typically hash or object that can
+ # be serialized to JSON
+ # @option options [#read, #to_str] :body
+ # Raw body to send in POST/PUT requests
+ def initialize_media_upload(options)
+ raise "media upload not supported by arvados-google-api-client"
+ end
+
+ ##
+ # Assemble a multipart message from a set of parts
+ #
+ # @api private
+ #
+ # @param [Array<[#read,#to_str]>] parts
+ # Array of parts to encode.
+ # @param [String] mime_type
+ # MIME type of the message
+ # @param [String] boundary
+ # Boundary for separating each part of the message
+ def build_multipart(parts, mime_type = 'multipart/related', boundary = MULTIPART_BOUNDARY)
+ raise "multipart upload not supported by arvados-google-api-client"
+ end
+
+ ##
+ # Serialize body object to JSON
+ #
+ # @api private
+ #
+ # @param [#to_json,#to_hash] body
+ # object to serialize
+ #
+ # @return [String]
+ # JSON
+ def serialize_body(body)
+ return body.to_json if body.respond_to?(:to_json)
+ return MultiJson.dump(body.to_hash) if body.respond_to?(:to_hash)
+ raise TypeError, 'Could not convert body object to JSON.' +
+ 'Must respond to :to_json or :to_hash.'
+ end
+
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/result.rb b/sdk/ruby-google-api-client/lib/google/api_client/result.rb
new file mode 100644
index 0000000000..c48bec04a5
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/result.rb
@@ -0,0 +1,255 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+module Google
+ class APIClient
+ ##
+ # This class wraps a result returned by an API call.
+ class Result
+ extend Forwardable
+
+ ##
+ # Init the result
+ #
+ # @param [Google::APIClient::Request] request
+ # The original request
+ # @param [Faraday::Response] response
+ # Raw HTTP Response
+ def initialize(request, response)
+ @request = request
+ @response = response
+ @media_upload = reference if reference.kind_of?(ResumableUpload)
+ end
+
+ # @return [Google::APIClient::Request] Original request object
+ attr_reader :request
+ # @return [Faraday::Response] HTTP response
+ attr_reader :response
+ # @!attribute [r] reference
+ # @return [Google::APIClient::Request] Original request object
+ # @deprecated See {#request}
+ alias_method :reference, :request # For compatibility with pre-beta clients
+
+ # @!attribute [r] status
+ # @return [Fixnum] HTTP status code
+ # @!attribute [r] headers
+ # @return [Hash] HTTP response headers
+ # @!attribute [r] body
+ # @return [String] HTTP response body
+ def_delegators :@response, :status, :headers, :body
+
+ # @!attribute [r] resumable_upload
+ # @return [Google::APIClient::ResumableUpload] For resuming media uploads
+ def resumable_upload
+ @media_upload ||= (
+ options = self.reference.to_hash.merge(
+ :uri => self.headers['location'],
+ :media => self.reference.media
+ )
+ Google::APIClient::ResumableUpload.new(options)
+ )
+ end
+
+ ##
+ # Get the content type of the response
+ # @!attribute [r] media_type
+ # @return [String]
+ # Value of content-type header
+ def media_type
+ _, content_type = self.headers.detect do |h, v|
+ h.downcase == 'Content-Type'.downcase
+ end
+ if content_type
+ return content_type[/^([^;]*);?.*$/, 1].strip.downcase
+ else
+ return nil
+ end
+ end
+
+ ##
+ # Check if request failed
+ #
+ # @!attribute [r] error?
+ # @return [TrueClass, FalseClass]
+ # true if result of operation is an error
+ def error?
+ return self.response.status >= 400
+ end
+
+ ##
+ # Check if request was successful
+ #
+ # @!attribute [r] success?
+ # @return [TrueClass, FalseClass]
+ # true if result of operation was successful
+ def success?
+ return !self.error?
+ end
+
+ ##
+ # Extracts error messages from the response body
+ #
+ # @!attribute [r] error_message
+ # @return [String]
+ # error message, if available
+ def error_message
+ if self.data?
+ if self.data.respond_to?(:error) &&
+ self.data.error.respond_to?(:message)
+ # You're going to get a terrible error message if the response isn't
+ # parsed successfully as an error.
+ return self.data.error.message
+ elsif self.data['error'] && self.data['error']['message']
+ return self.data['error']['message']
+ end
+ end
+ return self.body
+ end
+
+ ##
+ # Check for parsable data in response
+ #
+ # @!attribute [r] data?
+ # @return [TrueClass, FalseClass]
+ # true if body can be parsed
+ def data?
+ !(self.body.nil? || self.body.empty? || self.media_type != 'application/json')
+ end
+
+ ##
+ # Return parsed version of the response body.
+ #
+ # @!attribute [r] data
+ # @return [Object, Hash, String]
+ # Object if body parsable from API schema, Hash if JSON, raw body if unable to parse
+ def data
+ return @data ||= (begin
+ if self.data?
+ media_type = self.media_type
+ data = self.body
+ case media_type
+ when 'application/json'
+ data = MultiJson.load(data)
+ # Strip data wrapper, if present
+ data = data['data'] if data.has_key?('data')
+ else
+ raise ArgumentError,
+ "Content-Type not supported for parsing: #{media_type}"
+ end
+ if @request.api_method && @request.api_method.response_schema
+ # Automatically parse using the schema designated for the
+ # response of this API method.
+ data = @request.api_method.response_schema.new(data)
+ data
+ else
+ # Otherwise, return the raw unparsed value.
+ # This value must be indexable like a Hash.
+ data
+ end
+ end
+ end)
+ end
+
+ ##
+ # Get the token used for requesting the next page of data
+ #
+ # @!attribute [r] next_page_token
+ # @return [String]
+ # next page token
+ def next_page_token
+ if self.data.respond_to?(:next_page_token)
+ return self.data.next_page_token
+ elsif self.data.respond_to?(:[])
+ return self.data["nextPageToken"]
+ else
+ raise TypeError, "Data object did not respond to #next_page_token."
+ end
+ end
+
+ ##
+ # Build a request for fetching the next page of data
+ #
+ # @return [Google::APIClient::Request]
+ # API request for retrieving next page, nil if no page token available
+ def next_page
+ return nil unless self.next_page_token
+ merged_parameters = Hash[self.reference.parameters].merge({
+ self.page_token_param => self.next_page_token
+ })
+ # Because Requests can be coerced to Hashes, we can merge them,
+ # preserving all context except the API method parameters that we're
+ # using for pagination.
+ return Google::APIClient::Request.new(
+ Hash[self.reference].merge(:parameters => merged_parameters)
+ )
+ end
+
+ ##
+ # Get the token used for requesting the previous page of data
+ #
+ # @!attribute [r] prev_page_token
+ # @return [String]
+ # previous page token
+ def prev_page_token
+ if self.data.respond_to?(:prev_page_token)
+ return self.data.prev_page_token
+ elsif self.data.respond_to?(:[])
+ return self.data["prevPageToken"]
+ else
+ raise TypeError, "Data object did not respond to #next_page_token."
+ end
+ end
+
+ ##
+ # Build a request for fetching the previous page of data
+ #
+ # @return [Google::APIClient::Request]
+ # API request for retrieving previous page, nil if no page token available
+ def prev_page
+ return nil unless self.prev_page_token
+ merged_parameters = Hash[self.reference.parameters].merge({
+ self.page_token_param => self.prev_page_token
+ })
+ # Because Requests can be coerced to Hashes, we can merge them,
+ # preserving all context except the API method parameters that we're
+ # using for pagination.
+ return Google::APIClient::Request.new(
+ Hash[self.reference].merge(:parameters => merged_parameters)
+ )
+ end
+
+ ##
+ # Pagination scheme used by this request/response
+ #
+ # @!attribute [r] pagination_type
+ # @return [Symbol]
+ # currently always :token
+ def pagination_type
+ return :token
+ end
+
+ ##
+ # Name of the field that contains the pagination token
+ #
+ # @!attribute [r] page_token_param
+ # @return [String]
+ # currently always 'pageToken'
+ def page_token_param
+ return "pageToken"
+ end
+
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service.rb b/sdk/ruby-google-api-client/lib/google/api_client/service.rb
new file mode 100755
index 0000000000..28f2605d92
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service.rb
@@ -0,0 +1,233 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'google/api_client'
+require 'google/api_client/service/stub_generator'
+require 'google/api_client/service/resource'
+require 'google/api_client/service/request'
+require 'google/api_client/service/result'
+require 'google/api_client/service/batch'
+require 'google/api_client/service/simple_file_store'
+
+module Google
+ class APIClient
+
+ ##
+ # Experimental new programming interface at the API level.
+ # Hides Google::APIClient. Designed to be easier to use, with less code.
+ #
+ # @example
+ # calendar = Google::APIClient::Service.new('calendar', 'v3')
+ # result = calendar.events.list('calendarId' => 'primary').execute()
+ class Service
+ include Google::APIClient::Service::StubGenerator
+ extend Forwardable
+
+ DEFAULT_CACHE_FILE = 'discovery.cache'
+
+ # Cache for discovered APIs.
+ @@discovered = {}
+
+ ##
+ # Creates a new Service.
+ #
+ # @param [String, Symbol] api_name
+ # The name of the API this service will access.
+ # @param [String, Symbol] api_version
+ # The version of the API this service will access.
+ # @param [Hash] options
+ # The configuration parameters for the service.
+ # @option options [Symbol, #generate_authenticated_request] :authorization
+ # (:oauth_1)
+ # The authorization mechanism used by the client. The following
+ # mechanisms are supported out-of-the-box:
+ #
+ # :two_legged_oauth_1
+ # :oauth_1
+ # :oauth_2
+ #
+ # @option options [Boolean] :auto_refresh_token (true)
+ # The setting that controls whether or not the api client attempts to
+ # refresh authorization when a 401 is hit in #execute. If the token does
+ # not support it, this option is ignored.
+ # @option options [String] :application_name
+ # The name of the application using the client.
+ # @option options [String] :application_version
+ # The version number of the application using the client.
+ # @option options [String] :host ("www.googleapis.com")
+ # The API hostname used by the client. This rarely needs to be changed.
+ # @option options [String] :port (443)
+ # The port number used by the client. This rarely needs to be changed.
+ # @option options [String] :discovery_path ("/discovery/v1")
+ # The discovery base path. This rarely needs to be changed.
+ # @option options [String] :ca_file
+ # Optional set of root certificates to use when validating SSL connections.
+ # By default, a bundled set of trusted roots will be used.
+ # @option options [#generate_authenticated_request] :authorization
+ # The authorization mechanism for requests. Used only if
+ # `:authenticated` is `true`.
+ # @option options [TrueClass, FalseClass] :authenticated (default: true)
+ # `true` if requests must be signed or somehow
+ # authenticated, `false` otherwise.
+ # @option options [TrueClass, FalseClass] :gzip (default: true)
+ # `true` if gzip enabled, `false` otherwise.
+ # @option options [Faraday::Connection] :connection
+ # A custom connection to be used for all requests.
+ # @option options [ActiveSupport::Cache::Store, :default] :discovery_cache
+ # A cache store to place the discovery documents for loaded APIs.
+ # Avoids unnecessary roundtrips to the discovery service.
+ # :default loads the default local file cache store.
+ def initialize(api_name, api_version, options = {})
+ @api_name = api_name.to_s
+ if api_version.nil?
+ raise ArgumentError,
+ "API version must be set"
+ end
+ @api_version = api_version.to_s
+ if options && !options.respond_to?(:to_hash)
+ raise ArgumentError,
+ "expected options Hash, got #{options.class}"
+ end
+
+ params = {}
+ [:application_name, :application_version, :authorization, :host, :port,
+ :discovery_path, :auto_refresh_token, :key, :user_ip,
+ :ca_file].each do |option|
+ if options.include? option
+ params[option] = options[option]
+ end
+ end
+
+ @client = Google::APIClient.new(params)
+
+ @connection = options[:connection] || @client.connection
+
+ @options = options
+
+ # Initialize cache store. Default to SimpleFileStore if :cache_store
+ # is not provided and we have write permissions.
+ if options.include? :cache_store
+ @cache_store = options[:cache_store]
+ else
+ cache_exists = File.exists?(DEFAULT_CACHE_FILE)
+ if (cache_exists && File.writable?(DEFAULT_CACHE_FILE)) ||
+ (!cache_exists && File.writable?(Dir.pwd))
+ @cache_store = Google::APIClient::Service::SimpleFileStore.new(
+ DEFAULT_CACHE_FILE)
+ end
+ end
+
+ # Attempt to read API definition from memory cache.
+ # Not thread-safe, but the worst that can happen is a cache miss.
+ unless @api = @@discovered[[api_name, api_version]]
+ # Attempt to read API definition from cache store, if there is one.
+ # If there's a miss or no cache store, call discovery service.
+ if !@cache_store.nil?
+ @api = @cache_store.fetch("%s/%s" % [api_name, api_version]) do
+ @client.discovered_api(api_name, api_version)
+ end
+ else
+ @api = @client.discovered_api(api_name, api_version)
+ end
+ @@discovered[[api_name, api_version]] = @api
+ end
+
+ generate_call_stubs(self, @api)
+ end
+
+ ##
+ # Returns the authorization mechanism used by the service.
+ #
+ # @return [#generate_authenticated_request] The authorization mechanism.
+ def_delegators :@client, :authorization, :authorization=
+
+ ##
+ # The setting that controls whether or not the service attempts to
+ # refresh authorization when a 401 is hit during an API call.
+ #
+ # @return [Boolean]
+ def_delegators :@client, :auto_refresh_token, :auto_refresh_token=
+
+ ##
+ # The application's API key issued by the API console.
+ #
+ # @return [String] The API key.
+ def_delegators :@client, :key, :key=
+
+ ##
+ # The Faraday/HTTP connection used by this service.
+ #
+ # @return [Faraday::Connection]
+ attr_accessor :connection
+
+ ##
+ # The cache store used for storing discovery documents.
+ #
+ # @return [ActiveSupport::Cache::Store,
+ # Google::APIClient::Service::SimpleFileStore,
+ # nil]
+ attr_reader :cache_store
+
+ ##
+ # Prepares a Google::APIClient::BatchRequest object to make batched calls.
+ # @param [Array] calls
+ # Optional array of Google::APIClient::Service::Request to initialize
+ # the batch request with.
+ # @param [Proc] block
+ # Callback for every call's response. Won't be called if a call defined
+ # a callback of its own.
+ #
+ # @yield [Google::APIClient::Service::Result]
+ # block to be called when result ready
+ def batch(calls = nil, &block)
+ Google::APIClient::Service::BatchRequest.new(self, calls, &block)
+ end
+
+ ##
+ # Executes an API request.
+ # Do not call directly; this method is only used by Request objects when
+ # executing.
+ #
+ # @param [Google::APIClient::Service::Request,
+ # Google::APIClient::Service::BatchCall] request
+ # The request to be executed.
+ def execute(request)
+ if request.instance_of? Google::APIClient::Service::Request
+ params = {:api_method => request.method,
+ :parameters => request.parameters,
+ :connection => @connection}
+ if request.respond_to? :body
+ if request.body.respond_to? :to_hash
+ params[:body_object] = request.body
+ else
+ params[:body] = request.body
+ end
+ end
+ if request.respond_to? :media
+ params[:media] = request.media
+ end
+ [:authenticated, :gzip].each do |option|
+ if @options.include? option
+ params[option] = @options[option]
+ end
+ end
+ result = @client.execute(params)
+ return Google::APIClient::Service::Result.new(request, result)
+ elsif request.instance_of? Google::APIClient::Service::BatchRequest
+ @client.execute(request.base_batch, {:connection => @connection})
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service/batch.rb b/sdk/ruby-google-api-client/lib/google/api_client/service/batch.rb
new file mode 100644
index 0000000000..7ba406e612
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service/batch.rb
@@ -0,0 +1,110 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'google/api_client/service/result'
+require 'google/api_client/batch'
+
+module Google
+ class APIClient
+ class Service
+
+ ##
+ # Helper class to contain the result of an individual batched call.
+ #
+ class BatchedCallResult < Result
+ # @return [Fixnum] Index of the call
+ def call_index
+ return @base_result.response.call_id.to_i - 1
+ end
+ end
+
+ ##
+ #
+ #
+ class BatchRequest
+ ##
+ # Creates a new batch request.
+ # This class shouldn't be instantiated directly, but rather through
+ # Service.batch.
+ #
+ # @param [Array] calls
+ # List of Google::APIClient::Service::Request to be made.
+ # @param [Proc] block
+ # Callback for every call's response. Won't be called if a call
+ # defined a callback of its own.
+ #
+ # @yield [Google::APIClient::Service::Result]
+ # block to be called when result ready
+ def initialize(service, calls, &block)
+ @service = service
+ @base_batch = Google::APIClient::BatchRequest.new
+ @global_callback = block if block_given?
+
+ if calls && calls.length > 0
+ calls.each do |call|
+ add(call)
+ end
+ end
+ end
+
+ ##
+ # Add a new call to the batch request.
+ #
+ # @param [Google::APIClient::Service::Request] call
+ # the call to be added.
+ # @param [Proc] block
+ # callback for this call's response.
+ #
+ # @return [Google::APIClient::Service::BatchRequest]
+ # the BatchRequest, for chaining
+ #
+ # @yield [Google::APIClient::Service::Result]
+ # block to be called when result ready
+ def add(call, &block)
+ if !block_given? && @global_callback.nil?
+ raise BatchError, 'Request needs a block'
+ end
+ callback = block || @global_callback
+ base_call = {
+ :api_method => call.method,
+ :parameters => call.parameters
+ }
+ if call.respond_to? :body
+ if call.body.respond_to? :to_hash
+ base_call[:body_object] = call.body
+ else
+ base_call[:body] = call.body
+ end
+ end
+ @base_batch.add(base_call) do |base_result|
+ result = Google::APIClient::Service::BatchedCallResult.new(
+ call, base_result)
+ callback.call(result)
+ end
+ return self
+ end
+
+ ##
+ # Executes the batch request.
+ def execute
+ @service.execute(self)
+ end
+
+ attr_reader :base_batch
+
+ end
+
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service/request.rb b/sdk/ruby-google-api-client/lib/google/api_client/service/request.rb
new file mode 100755
index 0000000000..dcbc7e3213
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service/request.rb
@@ -0,0 +1,144 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module Google
+ class APIClient
+ class Service
+ ##
+ # Handles an API request.
+ # This contains a full definition of the request to be made (including
+ # method name, parameters, body and media). The remote API call can be
+ # invoked with execute().
+ class Request
+ ##
+ # Build a request.
+ # This class should not be directly instantiated in user code;
+ # instantiation is handled by the stub methods created on Service and
+ # Resource objects.
+ #
+ # @param [Google::APIClient::Service] service
+ # The parent Service instance that will execute the request.
+ # @param [Google::APIClient::Method] method
+ # The Method instance that describes the API method invoked by the
+ # request.
+ # @param [Hash] parameters
+ # A Hash of parameter names and values to be sent in the API call.
+ def initialize(service, method, parameters)
+ @service = service
+ @method = method
+ @parameters = parameters
+ @body = nil
+ @media = nil
+
+ metaclass = (class << self; self; end)
+
+ # If applicable, add "body", "body=" and resource-named methods for
+ # retrieving and setting the HTTP body for this request.
+ # Examples of setting the body for files.insert in the Drive API:
+ # request.body = object
+ # request.execute
+ # OR
+ # request.file = object
+ # request.execute
+ # OR
+ # request.body(object).execute
+ # OR
+ # request.file(object).execute
+ # Examples of retrieving the body for files.insert in the Drive API:
+ # object = request.body
+ # OR
+ # object = request.file
+ if method.request_schema
+ body_name = method.request_schema.data['id'].dup
+ body_name[0] = body_name[0].chr.downcase
+ body_name_equals = (body_name + '=').to_sym
+ body_name = body_name.to_sym
+
+ metaclass.send(:define_method, :body) do |*args|
+ if args.length == 1
+ @body = args.first
+ return self
+ elsif args.length == 0
+ return @body
+ else
+ raise ArgumentError,
+ "wrong number of arguments (#{args.length}; expecting 0 or 1)"
+ end
+ end
+
+ metaclass.send(:define_method, :body=) do |body|
+ @body = body
+ end
+
+ metaclass.send(:alias_method, body_name, :body)
+ metaclass.send(:alias_method, body_name_equals, :body=)
+ end
+
+ # If applicable, add "media" and "media=" for retrieving and setting
+ # the media object for this request.
+ # Examples of setting the media object:
+ # request.media = object
+ # request.execute
+ # OR
+ # request.media(object).execute
+ # Example of retrieving the media object:
+ # object = request.media
+ if method.media_upload
+ metaclass.send(:define_method, :media) do |*args|
+ if args.length == 1
+ @media = args.first
+ return self
+ elsif args.length == 0
+ return @media
+ else
+ raise ArgumentError,
+ "wrong number of arguments (#{args.length}; expecting 0 or 1)"
+ end
+ end
+
+ metaclass.send(:define_method, :media=) do |media|
+ @media = media
+ end
+ end
+ end
+
+ ##
+ # Returns the parent service capable of executing this request.
+ #
+ # @return [Google::APIClient::Service] The parent service.
+ attr_reader :service
+
+ ##
+ # Returns the Method instance that describes the API method invoked by
+ # the request.
+ #
+ # @return [Google::APIClient::Method] The API method description.
+ attr_reader :method
+
+ ##
+ # Contains the Hash of parameter names and values to be sent as the
+ # parameters for the API call.
+ #
+ # @return [Hash] The request parameters.
+ attr_accessor :parameters
+
+ ##
+ # Executes the request.
+ def execute
+ @service.execute(self)
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service/resource.rb b/sdk/ruby-google-api-client/lib/google/api_client/service/resource.rb
new file mode 100755
index 0000000000..b493769d4f
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service/resource.rb
@@ -0,0 +1,40 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module Google
+ class APIClient
+ class Service
+ ##
+ # Handles an API resource.
+ # Simple class that contains API methods and/or child resources.
+ class Resource
+ include Google::APIClient::Service::StubGenerator
+
+ ##
+ # Build a resource.
+ # This class should not be directly instantiated in user code; resources
+ # are instantiated by the stub generation mechanism on Service creation.
+ #
+ # @param [Google::APIClient::Service] service
+ # The Service instance this resource belongs to.
+ # @param [Google::APIClient::API, Google::APIClient::Resource] root
+ # The node corresponding to this resource.
+ def initialize(service, root)
+ @service = service
+ generate_call_stubs(service, root)
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service/result.rb b/sdk/ruby-google-api-client/lib/google/api_client/service/result.rb
new file mode 100755
index 0000000000..7957ea6a26
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service/result.rb
@@ -0,0 +1,162 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module Google
+ class APIClient
+ class Service
+ ##
+ # Handles an API result.
+ # Wraps around the Google::APIClient::Result class, making it easier to
+ # handle the result (e.g. pagination) and keeping it in line with the rest
+ # of the Service programming interface.
+ class Result
+ extend Forwardable
+
+ ##
+ # Init the result.
+ #
+ # @param [Google::APIClient::Service::Request] request
+ # The original request
+ # @param [Google::APIClient::Result] base_result
+ # The base result to be wrapped
+ def initialize(request, base_result)
+ @request = request
+ @base_result = base_result
+ end
+
+ # @!attribute [r] status
+ # @return [Fixnum] HTTP status code
+ # @!attribute [r] headers
+ # @return [Hash] HTTP response headers
+ # @!attribute [r] body
+ # @return [String] HTTP response body
+ def_delegators :@base_result, :status, :headers, :body
+
+ # @return [Google::APIClient::Service::Request] Original request object
+ attr_reader :request
+
+ ##
+ # Get the content type of the response
+ # @!attribute [r] media_type
+ # @return [String]
+ # Value of content-type header
+ def_delegators :@base_result, :media_type
+
+ ##
+ # Check if request failed
+ #
+ # @!attribute [r] error?
+ # @return [TrueClass, FalseClass]
+ # true if result of operation is an error
+ def_delegators :@base_result, :error?
+
+ ##
+ # Check if request was successful
+ #
+ # @!attribute [r] success?
+ # @return [TrueClass, FalseClass]
+ # true if result of operation was successful
+ def_delegators :@base_result, :success?
+
+ ##
+ # Extracts error messages from the response body
+ #
+ # @!attribute [r] error_message
+ # @return [String]
+ # error message, if available
+ def_delegators :@base_result, :error_message
+
+ ##
+ # Check for parsable data in response
+ #
+ # @!attribute [r] data?
+ # @return [TrueClass, FalseClass]
+ # true if body can be parsed
+ def_delegators :@base_result, :data?
+
+ ##
+ # Return parsed version of the response body.
+ #
+ # @!attribute [r] data
+ # @return [Object, Hash, String]
+ # Object if body parsable from API schema, Hash if JSON, raw body if unable to parse
+ def_delegators :@base_result, :data
+
+ ##
+ # Pagination scheme used by this request/response
+ #
+ # @!attribute [r] pagination_type
+ # @return [Symbol]
+ # currently always :token
+ def_delegators :@base_result, :pagination_type
+
+ ##
+ # Name of the field that contains the pagination token
+ #
+ # @!attribute [r] page_token_param
+ # @return [String]
+ # currently always 'pageToken'
+ def_delegators :@base_result, :page_token_param
+
+ ##
+ # Get the token used for requesting the next page of data
+ #
+ # @!attribute [r] next_page_token
+ # @return [String]
+ # next page tokenx =
+ def_delegators :@base_result, :next_page_token
+
+ ##
+ # Get the token used for requesting the previous page of data
+ #
+ # @!attribute [r] prev_page_token
+ # @return [String]
+ # previous page token
+ def_delegators :@base_result, :prev_page_token
+
+ # @!attribute [r] resumable_upload
+ def resumable_upload
+ # TODO(sgomes): implement resumable_upload for Service::Result
+ raise NotImplementedError
+ end
+
+ ##
+ # Build a request for fetching the next page of data
+ #
+ # @return [Google::APIClient::Service::Request]
+ # API request for retrieving next page
+ def next_page
+ request = @request.clone
+ # Make a deep copy of the parameters.
+ request.parameters = Marshal.load(Marshal.dump(request.parameters))
+ request.parameters[page_token_param] = self.next_page_token
+ return request
+ end
+
+ ##
+ # Build a request for fetching the previous page of data
+ #
+ # @return [Google::APIClient::Service::Request]
+ # API request for retrieving previous page
+ def prev_page
+ request = @request.clone
+ # Make a deep copy of the parameters.
+ request.parameters = Marshal.load(Marshal.dump(request.parameters))
+ request.parameters[page_token_param] = self.prev_page_token
+ return request
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service/simple_file_store.rb b/sdk/ruby-google-api-client/lib/google/api_client/service/simple_file_store.rb
new file mode 100644
index 0000000000..216b3fac5f
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service/simple_file_store.rb
@@ -0,0 +1,151 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module Google
+ class APIClient
+ class Service
+
+ # Simple file store to be used in the event no ActiveSupport cache store
+ # is provided. This is not thread-safe, and does not support a number of
+ # features (such as expiration), but it's useful for the simple purpose of
+ # caching discovery documents to disk.
+ # Implements the basic cache methods of ActiveSupport::Cache::Store in a
+ # limited fashion.
+ class SimpleFileStore
+
+ # Creates a new SimpleFileStore.
+ #
+ # @param [String] file_path
+ # The path to the cache file on disk.
+ # @param [Object] options
+ # The options to be used with this SimpleFileStore. Not implemented.
+ def initialize(file_path, options = nil)
+ @file_path = file_path.to_s
+ end
+
+ # Returns true if a key exists in the cache.
+ #
+ # @param [String] name
+ # The name of the key. Will always be converted to a string.
+ # @param [Object] options
+ # The options to be used with this query. Not implemented.
+ def exist?(name, options = nil)
+ read_file
+ @cache.nil? ? nil : @cache.include?(name.to_s)
+ end
+
+ # Fetches data from the cache and returns it, using the given key.
+ # If the key is missing and no block is passed, returns nil.
+ # If the key is missing and a block is passed, executes the block, sets
+ # the key to its value, and returns it.
+ #
+ # @param [String] name
+ # The name of the key. Will always be converted to a string.
+ # @param [Object] options
+ # The options to be used with this query. Not implemented.
+ # @yield [String]
+ # optional block with the default value if the key is missing
+ def fetch(name, options = nil)
+ read_file
+ if block_given?
+ entry = read(name.to_s, options)
+ if entry.nil?
+ value = yield name.to_s
+ write(name.to_s, value)
+ return value
+ else
+ return entry
+ end
+ else
+ return read(name.to_s, options)
+ end
+ end
+
+ # Fetches data from the cache, using the given key.
+ # Returns nil if the key is missing.
+ #
+ # @param [String] name
+ # The name of the key. Will always be converted to a string.
+ # @param [Object] options
+ # The options to be used with this query. Not implemented.
+ def read(name, options = nil)
+ read_file
+ @cache.nil? ? nil : @cache[name.to_s]
+ end
+
+ # Writes the value to the cache, with the key.
+ #
+ # @param [String] name
+ # The name of the key. Will always be converted to a string.
+ # @param [Object] value
+ # The value to be written.
+ # @param [Object] options
+ # The options to be used with this query. Not implemented.
+ def write(name, value, options = nil)
+ read_file
+ @cache = {} if @cache.nil?
+ @cache[name.to_s] = value
+ write_file
+ return nil
+ end
+
+ # Deletes an entry in the cache.
+ # Returns true if an entry is deleted.
+ #
+ # @param [String] name
+ # The name of the key. Will always be converted to a string.
+ # @param [Object] options
+ # The options to be used with this query. Not implemented.
+ def delete(name, options = nil)
+ read_file
+ return nil if @cache.nil?
+ if @cache.include? name.to_s
+ @cache.delete name.to_s
+ write_file
+ return true
+ else
+ return nil
+ end
+ end
+
+ protected
+
+ # Read the entire cache file from disk.
+ # Will avoid reading if there have been no changes.
+ def read_file
+ if !File.exist? @file_path
+ @cache = nil
+ else
+ # Check for changes after our last read or write.
+ if @last_change.nil? || File.mtime(@file_path) > @last_change
+ File.open(@file_path) do |file|
+ @cache = Marshal.load(file)
+ @last_change = file.mtime
+ end
+ end
+ end
+ return @cache
+ end
+
+ # Write the entire cache contents to disk.
+ def write_file
+ File.open(@file_path, 'w') do |file|
+ Marshal.dump(@cache, file)
+ end
+ @last_change = File.mtime(@file_path)
+ end
+ end
+ end
+ end
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service/stub_generator.rb b/sdk/ruby-google-api-client/lib/google/api_client/service/stub_generator.rb
new file mode 100755
index 0000000000..3c84dddbd2
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service/stub_generator.rb
@@ -0,0 +1,61 @@
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'active_support/inflector'
+
+module Google
+ class APIClient
+ class Service
+ ##
+ # Auxiliary mixin to generate resource and method stubs.
+ # Used by the Service and Service::Resource classes to generate both
+ # top-level and nested resources and methods.
+ module StubGenerator
+ def generate_call_stubs(service, root)
+ metaclass = (class << self; self; end)
+
+ # Handle resources.
+ root.discovered_resources.each do |resource|
+ method_name = ActiveSupport::Inflector.underscore(resource.name).to_sym
+ if !self.respond_to?(method_name)
+ metaclass.send(:define_method, method_name) do
+ Google::APIClient::Service::Resource.new(service, resource)
+ end
+ end
+ end
+
+ # Handle methods.
+ root.discovered_methods.each do |method|
+ method_name = ActiveSupport::Inflector.underscore(method.name).to_sym
+ if !self.respond_to?(method_name)
+ metaclass.send(:define_method, method_name) do |*args|
+ if args.length > 1
+ raise ArgumentError,
+ "wrong number of arguments (#{args.length} for 1)"
+ elsif !args.first.respond_to?(:to_hash) && !args.first.nil?
+ raise ArgumentError,
+ "expected parameter Hash, got #{args.first.class}"
+ else
+ return Google::APIClient::Service::Request.new(
+ service, method, args.first
+ )
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/service_account.rb b/sdk/ruby-google-api-client/lib/google/api_client/service_account.rb
new file mode 100644
index 0000000000..3d941ae07c
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/service_account.rb
@@ -0,0 +1,21 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'google/api_client/auth/pkcs12'
+require 'google/api_client/auth/jwt_asserter'
+require 'google/api_client/auth/key_utils'
+require 'google/api_client/auth/compute_service_account'
+require 'google/api_client/auth/storage'
+require 'google/api_client/auth/storages/redis_store'
+require 'google/api_client/auth/storages/file_store'
diff --git a/sdk/ruby-google-api-client/lib/google/api_client/version.rb b/sdk/ruby-google-api-client/lib/google/api_client/version.rb
new file mode 100644
index 0000000000..3f78e4ae37
--- /dev/null
+++ b/sdk/ruby-google-api-client/lib/google/api_client/version.rb
@@ -0,0 +1,26 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+module Google
+ class APIClient
+ module VERSION
+ MAJOR = 0
+ MINOR = 8
+ TINY = 7
+ PATCH = 6
+ STRING = [MAJOR, MINOR, TINY, PATCH].compact.join('.')
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/rakelib/gem.rake b/sdk/ruby-google-api-client/rakelib/gem.rake
new file mode 100644
index 0000000000..71edc7f81a
--- /dev/null
+++ b/sdk/ruby-google-api-client/rakelib/gem.rake
@@ -0,0 +1,34 @@
+require "rubygems/package_task"
+
+namespace :gem do
+
+ desc "Build the gem"
+ task :build do
+ system "gem build signet.gemspec"
+ end
+
+ desc "Install the gem"
+ task :install => ["clobber", "gem:package"] do
+ sh "#{SUDO} gem install --local pkg/#{GEM_SPEC.full_name}"
+ end
+
+ desc "Uninstall the gem"
+ task :uninstall do
+ installed_list = Gem.source_index.find_name(PKG_NAME)
+ if installed_list &&
+ (installed_list.collect { |s| s.version.to_s}.include?(PKG_VERSION))
+ sh(
+ "#{SUDO} gem uninstall --version '#{PKG_VERSION}' " +
+ "--ignore-dependencies --executables #{PKG_NAME}"
+ )
+ end
+ end
+
+ desc "Reinstall the gem"
+ task :reinstall => [:uninstall, :install]
+end
+
+desc "Alias to gem:package"
+task "gem" => "gem:package"
+
+task "clobber" => ["gem:clobber_package"]
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/rakelib/git.rake b/sdk/ruby-google-api-client/rakelib/git.rake
new file mode 100644
index 0000000000..ac3f1c268f
--- /dev/null
+++ b/sdk/ruby-google-api-client/rakelib/git.rake
@@ -0,0 +1,45 @@
+namespace :git do
+ namespace :tag do
+ desc 'List tags from the Git repository'
+ task :list do
+ tags = `git tag -l`
+ tags.gsub!("\r", '')
+ tags = tags.split("\n").sort {|a, b| b <=> a }
+ puts tags.join("\n")
+ end
+
+ desc 'Create a new tag in the Git repository'
+ task :create do
+ changelog = File.open('CHANGELOG.md', 'r') { |file| file.read }
+ puts '-' * 80
+ puts changelog
+ puts '-' * 80
+ puts
+
+ v = ENV['VERSION'] or abort 'Must supply VERSION=x.y.z'
+ abort "Versions don't match #{v} vs #{PKG_VERSION}" if v != PKG_VERSION
+
+ git_status = `git status`
+ if git_status !~ /nothing to commit \(working directory clean\)/
+ abort "Working directory isn't clean."
+ end
+
+ tag = "#{PKG_NAME}-#{PKG_VERSION}"
+ msg = "Release #{PKG_NAME}-#{PKG_VERSION}"
+
+ existing_tags = `git tag -l #{PKG_NAME}-*`.split('\n')
+ if existing_tags.include?(tag)
+ warn('Tag already exists, deleting...')
+ unless system "git tag -d #{tag}"
+ abort 'Tag deletion failed.'
+ end
+ end
+ puts "Creating git tag '#{tag}'..."
+ unless system "git tag -a -m \"#{msg}\" #{tag}"
+ abort 'Tag creation failed.'
+ end
+ end
+ end
+end
+
+task 'gem:release' => 'git:tag:create'
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/rakelib/metrics.rake b/sdk/ruby-google-api-client/rakelib/metrics.rake
new file mode 100644
index 0000000000..67cb4eb777
--- /dev/null
+++ b/sdk/ruby-google-api-client/rakelib/metrics.rake
@@ -0,0 +1,22 @@
+namespace :metrics do
+ task :lines do
+ lines, codelines, total_lines, total_codelines = 0, 0, 0, 0
+ for file_name in FileList['lib/**/*.rb']
+ f = File.open(file_name)
+ while line = f.gets
+ lines += 1
+ next if line =~ /^\s*$/
+ next if line =~ /^\s*#/
+ codelines += 1
+ end
+ puts "L: #{sprintf('%4d', lines)}, " +
+ "LOC #{sprintf('%4d', codelines)} | #{file_name}"
+ total_lines += lines
+ total_codelines += codelines
+
+ lines, codelines = 0, 0
+ end
+
+ puts "Total: Lines #{total_lines}, LOC #{total_codelines}"
+ end
+end
diff --git a/sdk/ruby-google-api-client/rakelib/spec.rake b/sdk/ruby-google-api-client/rakelib/spec.rake
new file mode 100644
index 0000000000..102e9a9cc5
--- /dev/null
+++ b/sdk/ruby-google-api-client/rakelib/spec.rake
@@ -0,0 +1,22 @@
+require 'rake/clean'
+require 'rspec/core/rake_task'
+
+CLOBBER.include('coverage', 'specdoc')
+
+namespace :spec do
+ RSpec::Core::RakeTask.new(:all) do |t|
+ t.pattern = FileList['spec/**/*_spec.rb']
+ t.rspec_opts = ['--color', '--format', 'documentation']
+ end
+
+ desc 'Generate HTML Specdocs for all specs.'
+ RSpec::Core::RakeTask.new(:specdoc) do |t|
+ specdoc_path = File.expand_path('../../specdoc', __FILE__)
+
+ t.rspec_opts = %W( --format html --out #{File.join(specdoc_path, 'index.html')} )
+ t.fail_on_error = false
+ end
+end
+
+desc 'Alias to spec:all'
+task 'spec' => 'spec:all'
diff --git a/sdk/ruby-google-api-client/rakelib/wiki.rake b/sdk/ruby-google-api-client/rakelib/wiki.rake
new file mode 100644
index 0000000000..3e0d97d2e3
--- /dev/null
+++ b/sdk/ruby-google-api-client/rakelib/wiki.rake
@@ -0,0 +1,82 @@
+require 'rake'
+require 'rake/clean'
+
+CLOBBER.include('wiki')
+
+CACHE_PREFIX =
+ "http://www.gmodules.com/gadgets/proxy/container=default&debug=0&nocache=0/"
+
+namespace :wiki do
+ desc 'Autogenerate wiki pages'
+ task :supported_apis do
+ output = <<-WIKI
+#summary The list of supported APIs
+
+The Google API Client for Ruby is a small flexible client library for accessing
+the following Google APIs.
+
+WIKI
+ preferred_apis = {}
+ require 'google/api_client'
+ client = Google::APIClient.new
+ for api in client.discovered_apis
+ if !preferred_apis.has_key?(api.name)
+ preferred_apis[api.name] = api
+ elsif api.preferred
+ preferred_apis[api.name] = api
+ end
+ end
+ for api_name, api in preferred_apis
+ if api.documentation.to_s != "" && api.title != ""
+ output += (
+ "||#{CACHE_PREFIX}#{api['icons']['x16']}||" +
+ "[#{api.documentation} #{api.title}]||" +
+ "#{api.description}||\n"
+ )
+ end
+ end
+ output.gsub!(/-32\./, "-16.")
+ wiki_path = File.expand_path(
+ File.join(File.dirname(__FILE__), '../wiki/'))
+ Dir.mkdir(wiki_path) unless File.exists?(wiki_path)
+ File.open(File.join(wiki_path, 'SupportedAPIs.wiki'), 'w') do |file|
+ file.write(output)
+ end
+ end
+
+ task 'generate' => ['wiki:supported_apis']
+end
+
+begin
+ $LOAD_PATH.unshift(
+ File.expand_path(File.join(File.dirname(__FILE__), '../yard/lib'))
+ )
+ $LOAD_PATH.unshift(File.expand_path('.'))
+ $LOAD_PATH.uniq!
+
+ require 'yard'
+ require 'yard/rake/wikidoc_task'
+
+ namespace :wiki do
+ desc 'Generate Wiki Documentation with YARD'
+ YARD::Rake::WikidocTask.new do |yardoc|
+ yardoc.name = 'reference'
+ yardoc.options = [
+ '--verbose',
+ '--markup', 'markdown',
+ '-e', 'yard/lib/yard-google-code.rb',
+ '-p', 'yard/templates',
+ '-f', 'wiki',
+ '-o', 'wiki'
+ ]
+ yardoc.files = [
+ 'lib/**/*.rb', 'ext/**/*.c', '-', 'README.md', 'CHANGELOG.md'
+ ]
+ end
+
+ task 'generate' => ['wiki:reference', 'wiki:supported_apis']
+ end
+rescue LoadError
+ # If yard isn't available, it's not the end of the world
+ warn('YARD unavailable. Cannot fully generate wiki.')
+end
diff --git a/sdk/ruby-google-api-client/rakelib/yard.rake b/sdk/ruby-google-api-client/rakelib/yard.rake
new file mode 100644
index 0000000000..be0ff65922
--- /dev/null
+++ b/sdk/ruby-google-api-client/rakelib/yard.rake
@@ -0,0 +1,29 @@
+require 'rake'
+require 'rake/clean'
+
+CLOBBER.include('doc', '.yardoc')
+CLOBBER.uniq!
+
+begin
+ require 'yard'
+ require 'yard/rake/yardoc_task'
+
+ namespace :doc do
+ desc 'Generate Yardoc documentation'
+ YARD::Rake::YardocTask.new do |yardoc|
+ yardoc.name = 'yard'
+ yardoc.options = ['--verbose', '--markup', 'markdown']
+ yardoc.files = [
+ 'lib/**/*.rb', 'ext/**/*.c', '-',
+ 'README.md', 'CONTRIB.md', 'CHANGELOG.md', 'LICENSE'
+ ]
+ end
+ end
+
+ desc 'Alias to doc:yard'
+ task 'doc' => 'doc:yard'
+rescue LoadError
+ # If yard isn't available, it's not the end of the world
+ desc 'Alias to doc:rdoc'
+ task 'doc' => 'doc:rdoc'
+end
diff --git a/sdk/ruby-google-api-client/script/package b/sdk/ruby-google-api-client/script/package
new file mode 100755
index 0000000000..3f59b50373
--- /dev/null
+++ b/sdk/ruby-google-api-client/script/package
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# Usage: script/gem
+# Updates the gemspec and builds a new gem in the pkg directory.
+
+mkdir -p pkg
+gem build *.gemspec
+mv *.gem pkg
+
diff --git a/sdk/ruby-google-api-client/script/release b/sdk/ruby-google-api-client/script/release
new file mode 100755
index 0000000000..1a26a4234d
--- /dev/null
+++ b/sdk/ruby-google-api-client/script/release
@@ -0,0 +1,14 @@
+age: script/release
+# Build the package, tag a commit, push it to origin, and then release the
+# package publicly.
+
+set -e
+
+version="$(script/package | grep Version: | awk '{print $2}')"
+[ -n "$version" ] || exit 1
+
+git commit --allow-empty -a -m "Release $version"
+git tag "$version"
+git push --tags origin
+gem push pkg/*-${version}.gem
+
diff --git a/sdk/ruby-google-api-client/spec/fixtures/files/auth_stored_credentials.json b/sdk/ruby-google-api-client/spec/fixtures/files/auth_stored_credentials.json
new file mode 100644
index 0000000000..4cd786e4ae
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/fixtures/files/auth_stored_credentials.json
@@ -0,0 +1,8 @@
+{ "access_token":"access_token_123456789",
+ "authorization_uri":"https://accounts.google.com/o/oauth2/auth",
+ "client_id":"123456789p.apps.googleusercontent.com",
+ "client_secret":"very_secret",
+ "expires_in":3600,
+ "refresh_token":"refresh_token_12345679",
+ "token_credential_uri":"https://accounts.google.com/o/oauth2/token",
+ "issued_at":1386053761}
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/fixtures/files/client_secrets.json b/sdk/ruby-google-api-client/spec/fixtures/files/client_secrets.json
new file mode 100644
index 0000000000..05fa7cbb5a
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/fixtures/files/client_secrets.json
@@ -0,0 +1 @@
+{"installed":{"auth_uri":"https://accounts.google.com/o/oauth2/auth","client_secret":"i8YaXdGgiQ4_KrTVNGsB7QP1","token_uri":"https://accounts.google.com/o/oauth2/token","client_email":"","client_x509_cert_url":"","client_id":"898243283568.apps.googleusercontent.com","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs"}}
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/fixtures/files/privatekey.p12 b/sdk/ruby-google-api-client/spec/fixtures/files/privatekey.p12
new file mode 100644
index 0000000000..1e737a93a7
Binary files /dev/null and b/sdk/ruby-google-api-client/spec/fixtures/files/privatekey.p12 differ
diff --git a/sdk/ruby-google-api-client/spec/fixtures/files/sample.txt b/sdk/ruby-google-api-client/spec/fixtures/files/sample.txt
new file mode 100644
index 0000000000..fe9a30d954
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/fixtures/files/sample.txt
@@ -0,0 +1,33 @@
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus posuere urna bibendum diam vulputate fringilla. Fusce elementum fermentum justo id aliquam. Integer vel felis ut arcu elementum lacinia. Duis congue urna eget nisl dapibus tristique molestie turpis sollicitudin. Vivamus in justo quam. Proin condimentum mollis tortor at molestie. Cras luctus, nunc a convallis iaculis, est risus consequat nisi, sit amet sollicitudin metus mi a urna. Aliquam accumsan, massa quis condimentum varius, sapien massa faucibus nibh, a dignissim magna nibh a lacus. Nunc aliquet, nunc ac pulvinar consectetur, sapien lacus hendrerit enim, nec dapibus lorem mi eget risus. Praesent vitae justo eget dolor blandit ullamcorper. Duis id nibh vitae sem aliquam vehicula et ac massa. In neque elit, molestie pulvinar viverra at, vestibulum quis velit.
+
+Mauris sit amet placerat enim. Duis vel tellus ac dui auctor tincidunt id nec augue. Donec ut blandit turpis. Mauris dictum urna id urna vestibulum accumsan. Maecenas sagittis urna vitae erat facilisis gravida. Phasellus tellus augue, commodo ut iaculis vitae, interdum ut dolor. Proin at dictum lorem. Quisque pellentesque neque ante, vitae rutrum elit. Pellentesque sit amet erat orci. Praesent justo diam, tristique eu tempus ut, vestibulum eget dui. Maecenas et elementum justo. Cras a augue a elit porttitor placerat eget ut magna.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam adipiscing tellus in arcu bibendum volutpat. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed laoreet faucibus tristique. Duis metus eros, molestie eget dignissim in, imperdiet fermentum nulla. Vestibulum laoreet lorem eu justo vestibulum lobortis. Praesent pharetra leo vel mauris rhoncus commodo sollicitudin ante auctor. Ut sagittis, tortor nec placerat rutrum, neque ipsum cursus nisl, ut lacinia magna risus ac risus. Sed volutpat commodo orci, sodales fermentum dui accumsan eu. Donec egestas ullamcorper elit at condimentum. In euismod sodales posuere. Nullam lacinia tempus molestie. Etiam vitae ullamcorper dui. Fusce congue suscipit arcu, at consectetur diam gravida id. Quisque augue urna, commodo eleifend volutpat vitae, tincidunt ac ligula. Curabitur eget orci nisl, vel placerat ipsum.
+
+Curabitur rutrum euismod nisi, consectetur varius tortor condimentum non. Pellentesque rhoncus nisi eu purus ultricies suscipit. Morbi ante nisi, varius nec molestie bibendum, pharetra quis enim. Proin eget nunc ante. Cras aliquam enim vel nunc laoreet ut facilisis nunc interdum. Fusce libero ipsum, posuere eget blandit quis, bibendum vitae quam. Integer dictum faucibus lacus eget facilisis. Duis adipiscing tortor magna, vel tincidunt risus. In non augue eu nisl sodales cursus vel eget nisi. Maecenas dignissim lectus elementum eros fermentum gravida et eget leo. Aenean quis cursus arcu. Mauris posuere purus non diam mattis vehicula. Integer nec orci velit.
+
+Integer ac justo ac magna adipiscing condimentum vitae tincidunt dui. Morbi augue arcu, blandit nec interdum sit amet, condimentum vel nisl. Nulla vehicula tincidunt laoreet. Aliquam ornare elementum urna, sed vehicula magna porta id. Vestibulum dictum ultrices tortor sit amet tincidunt. Praesent bibendum, metus vel volutpat interdum, nisl nunc cursus libero, vel congue ligula mi et felis. Nulla mollis elementum nulla, in accumsan risus consequat at. Suspendisse potenti. Vestibulum enim lorem, dignissim ut porta vestibulum, porta eget mi. Fusce a elit ac dui sodales gravida. Pellentesque sed elit at dui dapibus mattis a non arcu.
+
+Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. In nec posuere augue. Praesent non suscipit arcu. Sed nibh risus, lacinia ut molestie vitae, tristique eget turpis. Sed pretium volutpat arcu, non rutrum leo volutpat sed. Maecenas quis neque nisl, sit amet ornare dolor. Nulla pharetra pulvinar tellus sed eleifend. Aliquam eget mattis nulla. Nulla dictum vehicula velit, non facilisis lorem volutpat id. Fusce scelerisque sem vitae purus dapibus lobortis. Mauris ac turpis nec nibh consequat porttitor. Ut sit amet iaculis lorem. Vivamus blandit erat ac odio venenatis fringilla a sit amet ante. Quisque ut urna sed augue laoreet sagittis.
+
+Integer nisl urna, bibendum id lobortis in, tempor non velit. Fusce sed volutpat quam. Suspendisse eu placerat purus. Maecenas quis feugiat lectus. Sed accumsan malesuada dui, a pretium purus facilisis quis. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc ac purus id lacus malesuada placerat et in nunc. Ut imperdiet tincidunt est, at consectetur augue egestas hendrerit. Pellentesque eu erat a dui dignissim adipiscing. Integer quis leo non felis placerat eleifend. Fusce luctus mi a lorem mattis eget accumsan libero posuere. Sed pellentesque, odio id pharetra tempus, enim quam placerat metus, auctor aliquam elit mi facilisis quam. Nam at velit et eros rhoncus accumsan.
+
+Donec tellus diam, fringilla ac viverra fringilla, rhoncus sit amet purus. Cras et ligula sed nibh tempor gravida. Aliquam id tempus mauris. Ut convallis quam sed arcu varius eget mattis magna tincidunt. Aliquam et suscipit est. Sed metus augue, tristique sed accumsan eget, euismod et augue. Nam augue sapien, placerat vel facilisis eu, tempor id risus. Aliquam mollis egestas mi. Fusce scelerisque convallis mauris quis blandit. Mauris nec ante id lacus sagittis tincidunt ornare vehicula dui. Curabitur tristique mattis nunc, vel cursus libero viverra feugiat. Suspendisse at sapien velit, a lacinia dolor. Vivamus in est non odio feugiat lacinia sodales ut magna.
+
+Donec interdum ligula id ipsum dapibus consectetur. Pellentesque vitae posuere ligula. Morbi rhoncus bibendum eleifend. Suspendisse fringilla nunc at elit malesuada vitae ullamcorper lorem laoreet. Suspendisse a ante at ipsum iaculis cursus. Duis accumsan ligula quis nibh luctus pretium. Duis ultrices scelerisque dolor, et vulputate lectus commodo ut.
+
+Vestibulum ac tincidunt lorem. Vestibulum lorem massa, dictum a scelerisque ut, convallis vitae eros. Morbi ipsum nisl, lacinia non tempor nec, lobortis id diam. Fusce quis magna nunc. Proin ultricies congue justo sed mattis. Vestibulum sit amet arcu tellus. Quisque ultricies porta massa iaculis vehicula. Vestibulum sollicitudin tempor urna vel sodales. Pellentesque ultricies tellus vel metus porta nec iaculis sapien mollis. Maecenas ullamcorper, metus eget imperdiet sagittis, odio orci dapibus neque, in vulputate nunc nibh non libero. Donec velit quam, lobortis quis tempus a, hendrerit id arcu.
+
+Donec nec ante at tortor dignissim mattis. Curabitur vehicula tincidunt magna id sagittis. Proin euismod dignissim porta. Curabitur non turpis purus, in rutrum nulla. Nam turpis nulla, tincidunt et hendrerit non, posuere nec enim. Curabitur leo enim, lobortis ut placerat id, condimentum nec massa. In bibendum, lectus sit amet molestie commodo, felis massa rutrum nisl, ac fermentum ligula lacus in ipsum.
+
+Pellentesque mi nulla, scelerisque vitae tempus id, consequat a augue. Quisque vel nisi sit amet ipsum faucibus laoreet sed vitae lorem. Praesent nunc tortor, volutpat ac commodo non, pharetra sed neque. Curabitur nec felis at mi blandit aliquet eu ornare justo. Mauris dignissim purus quis nisl porttitor interdum. Aenean id ipsum enim, blandit commodo justo. Quisque facilisis elit quis velit commodo scelerisque lobortis sapien condimentum. Cras sit amet porttitor velit. Praesent nec tempor arcu.
+
+Donec varius mi adipiscing elit semper vel feugiat ipsum dictum. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Donec non quam nisl, ac mattis justo. Vestibulum sed massa eget velit tristique auctor ut ac sapien. Curabitur aliquet ligula eget dui ornare at scelerisque mauris faucibus. Vestibulum id mauris metus, sed vestibulum nibh. Nulla egestas dictum blandit. Mauris vitae nibh at dui mollis lobortis. Phasellus sem leo, euismod at fringilla quis, mollis in nibh. Aenean vel lacus et elit pharetra elementum. Aliquam at ligula id sem bibendum volutpat. Pellentesque quis elit a massa dapibus viverra ut et lorem. Donec nulla eros, iaculis nec commodo vel, suscipit sit amet tortor. Integer tempor, elit at viverra imperdiet, velit sapien laoreet nunc, id laoreet ligula risus vel risus. Nullam sed tortor metus.
+
+In nunc orci, tempor vulputate pretium vel, suscipit quis risus. Suspendisse accumsan facilisis felis eget posuere. Donec a faucibus felis. Proin nibh erat, sollicitudin quis vestibulum id, tincidunt quis justo. In sed purus eu nisi dignissim condimentum. Sed mattis dapibus lorem id vulputate. Suspendisse nec elit a augue interdum consequat quis id magna. In eleifend aliquam tempor. In in lacus augue.
+
+Ut euismod sollicitudin lorem, id aliquam magna dictum sed. Nunc fringilla lobortis nisi sed consectetur. Nulla facilisi. Aenean nec lobortis augue. Curabitur ullamcorper dapibus libero, vel pellentesque arcu sollicitudin non. Praesent varius, turpis nec sollicitudin bibendum, elit tortor rhoncus lacus, gravida luctus leo nisi in felis. Ut metus eros, molestie non faucibus vel, condimentum ac elit.
+
+Suspendisse nisl justo, lacinia sit amet interdum nec, tincidunt placerat urna. Suspendisse potenti. In et odio sed purus malesuada cursus sed nec lectus. Cras commodo, orci sit amet hendrerit iaculis, nunc urna facilisis tellus, vel laoreet odio nulla quis nibh. Maecenas ut justo ut lacus posuere sodales. Vestibulum facilisis fringilla diam at volutpat. Proin a hendrerit urna. Aenean placerat pulvinar arcu, sit amet lobortis neque eleifend in. Aenean risus nulla, facilisis ut tincidunt vitae, fringilla at ligula. Praesent eleifend est at sem lacinia auctor. Nulla ornare nunc in erat laoreet blandit.
+
+Suspendisse pharetra leo ac est porta consequat. Nunc sem nibh, gravida vel aliquam a, ornare in tortor. Nulla vel sapien et felis placerat pellentesque id scelerisque nisl. Praesent et posuere.
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/fixtures/files/secret.pem b/sdk/ruby-google-api-client/spec/fixtures/files/secret.pem
new file mode 100644
index 0000000000..28b8d12056
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/fixtures/files/secret.pem
@@ -0,0 +1,19 @@
+Bag Attributes
+ friendlyName: privatekey
+ localKeyID: 54 69 6D 65 20 31 33 35 31 38 38 38 31 37 38 36 39 36
+Key Attributes:
+-----BEGIN RSA PRIVATE KEY-----
+MIICXAIBAAKBgQDYDyPb3GhyFx5i/wxS/jFsO6wSLys1ehAk6QZoBXGlg7ETVrIJ
+HYh9gXQUno4tJiQoaO8wOvleIRrqI0LkiftCXKWVSrzOiV+O9GkKx1byw1yAIZus
+QdwMT7X0O9hrZLZwhICWC9s6cGhnlCVxLIP/+JkVK7hxEq/LxoSszNV77wIDAQAB
+AoGAa2G69L7quil7VMBmI6lqbtyJfNAsrXtpIq8eG/z4qsZ076ObAKTI/XeldcoH
+57CZL+xXVKU64umZMt0rleJuGXdlauEUbsSx+biGewRfGTgC4rUSjmE539rBvmRW
+gaKliorepPMp/+B9CcG/2YfDPRvG/2cgTXJHVvneo+xHL4ECQQD2Jx5Mvs8z7s2E
+jY1mkpRKqh4Z7rlitkAwe1NXcVC8hz5ASu7ORyTl8EPpKAfRMYl1ofK/ozT1URXf
+kL5nChPfAkEA4LPUJ6cqrY4xrrtdGaM4iGIxzen5aZlKz/YNlq5LuQKbnLLHMuXU
+ohp/ynpqNWbcAFbmtGSMayxGKW5+fJgZ8QJAUBOZv82zCmn9YcnK3juBEmkVMcp/
+dKVlbGAyVJgAc9RrY+78kQ6D6mmnLgpfwKYk2ae9mKo3aDbgrsIfrtWQcQJAfFGi
+CEpJp3orbLQG319ZsMM7MOTJdC42oPZOMFbAWFzkAX88DKHx0bn9h+XQizkccSej
+Ppz+v3DgZJ3YZ1Cz0QJBALiqIokZ+oa3AY6oT0aiec6txrGvNPPbwOsrBpFqGNbu
+AByzWWBoBi40eKMSIR30LqN9H8YnJ91Aoy1njGYyQaw=
+-----END RSA PRIVATE KEY-----
diff --git a/sdk/ruby-google-api-client/spec/fixtures/files/zoo.json b/sdk/ruby-google-api-client/spec/fixtures/files/zoo.json
new file mode 100644
index 0000000000..4abd957c90
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/fixtures/files/zoo.json
@@ -0,0 +1,584 @@
+{
+ "kind": "discovery#describeItem",
+ "name": "zoo",
+ "version": "v1",
+ "description": "Zoo API used for testing",
+ "basePath": "/zoo/",
+ "rootUrl": "https://www.googleapis.com/",
+ "servicePath": "zoo/v1/",
+ "rpcPath": "/rpc",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
+ "location": "query"
+ },
+ "userIp": {
+ "type": "string",
+ "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
+ "location": "query"
+ }
+ },
+ "features": [
+ "dataWrapper"
+ ],
+ "schemas": {
+ "Animal": {
+ "id": "Animal",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string"
+ },
+ "kind": {
+ "type": "string",
+ "default": "zoo#animal"
+ },
+ "name": {
+ "type": "string"
+ },
+ "photo": {
+ "type": "object",
+ "properties": {
+ "filename": {
+ "type": "string"
+ },
+ "hash": {
+ "type": "string"
+ },
+ "hashAlgorithm": {
+ "type": "string"
+ },
+ "size": {
+ "type": "integer"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "Animal2": {
+ "id": "Animal2",
+ "type": "object",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "default": "zoo#animal"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "AnimalFeed": {
+ "id": "AnimalFeed",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "Animal"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "default": "zoo#animalFeed"
+ }
+ }
+ },
+ "AnimalMap": {
+ "id": "AnimalMap",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string"
+ },
+ "animals": {
+ "type": "object",
+ "description": "Map of animal id to animal data",
+ "additionalProperties": {
+ "$ref": "Animal"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "default": "zoo#animalMap"
+ }
+ }
+ },
+ "LoadFeed": {
+ "id": "LoadFeed",
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "doubleVal": {
+ "type": "number"
+ },
+ "nullVal": {
+ "type": "null"
+ },
+ "booleanVal": {
+ "type": "boolean",
+ "description": "True or False."
+ },
+ "anyVal": {
+ "type": "any",
+ "description": "Anything will do."
+ },
+ "enumVal": {
+ "type": "string"
+ },
+ "kind": {
+ "type": "string",
+ "default": "zoo#loadValue"
+ },
+ "longVal": {
+ "type": "integer"
+ },
+ "stringVal": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "kind": {
+ "type": "string",
+ "default": "zoo#loadFeed"
+ }
+ }
+ }
+ },
+ "methods": {
+ "query": {
+ "path": "query",
+ "id": "bigquery.query",
+ "httpMethod": "GET",
+ "parameters": {
+ "q": {
+ "type": "string",
+ "location": "query",
+ "required": false,
+ "repeated": false
+ },
+ "i": {
+ "type": "integer",
+ "location": "query",
+ "required": false,
+ "repeated": false,
+ "minimum": "0",
+ "maximum": "4294967295",
+ "default": "20"
+ },
+ "n": {
+ "type": "number",
+ "location": "query",
+ "required": false,
+ "repeated": false
+ },
+ "b": {
+ "type": "boolean",
+ "location": "query",
+ "required": false,
+ "repeated": false
+ },
+ "a": {
+ "type": "any",
+ "location": "query",
+ "required": false,
+ "repeated": false
+ },
+ "o": {
+ "type": "object",
+ "location": "query",
+ "required": false,
+ "repeated": false
+ },
+ "e": {
+ "type": "string",
+ "location": "query",
+ "required": false,
+ "repeated": false,
+ "enum": [
+ "foo",
+ "bar"
+ ]
+ },
+ "er": {
+ "type": "string",
+ "location": "query",
+ "required": false,
+ "repeated": true,
+ "enum": [
+ "one",
+ "two",
+ "three"
+ ]
+ },
+ "rr": {
+ "type": "string",
+ "location": "query",
+ "required": false,
+ "repeated": true,
+ "pattern": "[a-z]+"
+ }
+ }
+ }
+ },
+ "resources": {
+ "my": {
+ "resources": {
+ "favorites": {
+ "methods": {
+ "list": {
+ "path": "favorites/@me/mine",
+ "id": "zoo.animals.mine",
+ "httpMethod": "GET",
+ "parameters": {
+ "max-results": {
+ "location": "query",
+ "required": false
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "global": {
+ "resources": {
+ "print": {
+ "methods": {
+ "assert": {
+ "path": "global/print/assert",
+ "id": "zoo.animals.mine",
+ "httpMethod": "GET",
+ "parameters": {
+ "max-results": {
+ "location": "query",
+ "required": false
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "animals": {
+ "methods": {
+ "crossbreed": {
+ "path": "animals/crossbreed",
+ "id": "zoo.animals.crossbreed",
+ "httpMethod": "POST",
+ "description": "Cross-breed animals",
+ "response": {
+ "$ref": "Animal2"
+ },
+ "mediaUpload": {
+ "accept": [
+ "image/png"
+ ],
+ "protocols": {
+ "simple": {
+ "multipart": true,
+ "path": "upload/activities/{userId}/@self"
+ },
+ "resumable": {
+ "multipart": true,
+ "path": "upload/activities/{userId}/@self"
+ }
+ }
+ }
+ },
+ "delete": {
+ "path": "animals/{name}",
+ "id": "zoo.animals.delete",
+ "httpMethod": "DELETE",
+ "description": "Delete animals",
+ "parameters": {
+ "name": {
+ "location": "path",
+ "required": true,
+ "description": "Name of the animal to delete",
+ "type": "string"
+ }
+ },
+ "parameterOrder": [
+ "name"
+ ]
+ },
+ "get": {
+ "path": "animals/{name}",
+ "id": "zoo.animals.get",
+ "httpMethod": "GET",
+ "description": "Get animals",
+ "supportsMediaDownload": true,
+ "parameters": {
+ "name": {
+ "location": "path",
+ "required": true,
+ "description": "Name of the animal to load",
+ "type": "string"
+ },
+ "projection": {
+ "location": "query",
+ "type": "string",
+ "enum": [
+ "full"
+ ],
+ "enumDescriptions": [
+ "Include everything"
+ ]
+ }
+ },
+ "parameterOrder": [
+ "name"
+ ],
+ "response": {
+ "$ref": "Animal"
+ }
+ },
+ "getmedia": {
+ "path": "animals/{name}",
+ "id": "zoo.animals.get",
+ "httpMethod": "GET",
+ "description": "Get animals",
+ "parameters": {
+ "name": {
+ "location": "path",
+ "required": true,
+ "description": "Name of the animal to load",
+ "type": "string"
+ },
+ "projection": {
+ "location": "query",
+ "type": "string",
+ "enum": [
+ "full"
+ ],
+ "enumDescriptions": [
+ "Include everything"
+ ]
+ }
+ },
+ "parameterOrder": [
+ "name"
+ ]
+ },
+ "insert": {
+ "path": "animals",
+ "id": "zoo.animals.insert",
+ "httpMethod": "POST",
+ "description": "Insert animals",
+ "request": {
+ "$ref": "Animal"
+ },
+ "response": {
+ "$ref": "Animal"
+ },
+ "mediaUpload": {
+ "accept": [
+ "image/png"
+ ],
+ "maxSize": "1KB",
+ "protocols": {
+ "simple": {
+ "multipart": true,
+ "path": "upload/activities/{userId}/@self"
+ },
+ "resumable": {
+ "multipart": true,
+ "path": "upload/activities/{userId}/@self"
+ }
+ }
+ }
+ },
+ "list": {
+ "path": "animals",
+ "id": "zoo.animals.list",
+ "httpMethod": "GET",
+ "description": "List animals",
+ "parameters": {
+ "max-results": {
+ "location": "query",
+ "description": "Maximum number of results to return",
+ "type": "integer",
+ "minimum": "0"
+ },
+ "name": {
+ "location": "query",
+ "description": "Restrict result to animals with this name",
+ "type": "string"
+ },
+ "projection": {
+ "location": "query",
+ "type": "string",
+ "enum": [
+ "full"
+ ],
+ "enumDescriptions": [
+ "Include absolutely everything"
+ ]
+ },
+ "start-token": {
+ "location": "query",
+ "description": "Pagination token",
+ "type": "string"
+ }
+ },
+ "response": {
+ "$ref": "AnimalFeed"
+ }
+ },
+ "patch": {
+ "path": "animals/{name}",
+ "id": "zoo.animals.patch",
+ "httpMethod": "PATCH",
+ "description": "Update animals",
+ "parameters": {
+ "name": {
+ "location": "path",
+ "required": true,
+ "description": "Name of the animal to update",
+ "type": "string"
+ }
+ },
+ "parameterOrder": [
+ "name"
+ ],
+ "request": {
+ "$ref": "Animal"
+ },
+ "response": {
+ "$ref": "Animal"
+ }
+ },
+ "update": {
+ "path": "animals/{name}",
+ "id": "zoo.animals.update",
+ "httpMethod": "PUT",
+ "description": "Update animals",
+ "parameters": {
+ "name": {
+ "location": "path",
+ "description": "Name of the animal to update",
+ "type": "string"
+ }
+ },
+ "parameterOrder": [
+ "name"
+ ],
+ "request": {
+ "$ref": "Animal"
+ },
+ "response": {
+ "$ref": "Animal"
+ }
+ }
+ }
+ },
+ "load": {
+ "methods": {
+ "list": {
+ "path": "load",
+ "id": "zoo.load.list",
+ "httpMethod": "GET",
+ "response": {
+ "$ref": "LoadFeed"
+ }
+ }
+ }
+ },
+ "loadNoTemplate": {
+ "methods": {
+ "list": {
+ "path": "loadNoTemplate",
+ "id": "zoo.loadNoTemplate.list",
+ "httpMethod": "GET"
+ }
+ }
+ },
+ "scopedAnimals": {
+ "methods": {
+ "list": {
+ "path": "scopedanimals",
+ "id": "zoo.scopedAnimals.list",
+ "httpMethod": "GET",
+ "description": "List animals (scoped)",
+ "parameters": {
+ "max-results": {
+ "location": "query",
+ "description": "Maximum number of results to return",
+ "type": "integer",
+ "minimum": "0"
+ },
+ "name": {
+ "location": "query",
+ "description": "Restrict result to animals with this name",
+ "type": "string"
+ },
+ "projection": {
+ "location": "query",
+ "type": "string",
+ "enum": [
+ "full"
+ ],
+ "enumDescriptions": [
+ "Include absolutely everything"
+ ]
+ },
+ "start-token": {
+ "location": "query",
+ "description": "Pagination token",
+ "type": "string"
+ }
+ },
+ "response": {
+ "$ref": "AnimalFeed"
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/auth/storage_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/auth/storage_spec.rb
new file mode 100644
index 0000000000..d8e5b960c7
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/auth/storage_spec.rb
@@ -0,0 +1,122 @@
+require 'spec_helper'
+
+require 'google/api_client'
+require 'google/api_client/version'
+
+describe Google::APIClient::Storage do
+ let(:client) { Google::APIClient.new(:application_name => 'API Client Tests') }
+ let(:root_path) { File.expand_path(File.join(__FILE__, '..', '..', '..')) }
+ let(:json_file) { File.expand_path(File.join(root_path, 'fixtures', 'files', 'auth_stored_credentials.json')) }
+
+ let(:store) { double }
+ let(:client_stub) { double }
+ subject { Google::APIClient::Storage.new(store) }
+
+ describe 'authorize' do
+ it 'should authorize' do
+ expect(subject).to respond_to(:authorization)
+ expect(subject.store).to be == store
+ end
+ end
+
+ describe 'authorize' do
+ describe 'with credentials' do
+
+ it 'should initialize a new OAuth Client' do
+ expect(subject).to receive(:load_credentials).and_return({:first => 'a dummy'})
+ expect(client_stub).to receive(:issued_at=)
+ expect(client_stub).to receive(:expired?).and_return(false)
+ expect(Signet::OAuth2::Client).to receive(:new).and_return(client_stub)
+ expect(subject).not_to receive(:refresh_authorization)
+ subject.authorize
+ end
+
+ it 'should refresh authorization' do
+ expect(subject).to receive(:load_credentials).and_return({:first => 'a dummy'})
+ expect(client_stub).to receive(:issued_at=)
+ expect(client_stub).to receive(:expired?).and_return(true)
+ expect(Signet::OAuth2::Client).to receive(:new).and_return(client_stub)
+ expect(subject).to receive(:refresh_authorization)
+ auth = subject.authorize
+ expect(auth).to be == subject.authorization
+ expect(auth).not_to be_nil
+ end
+ end
+
+ describe 'without credentials' do
+
+ it 'should return nil' do
+ expect(subject.authorization).to be_nil
+ expect(subject).to receive(:load_credentials).and_return({})
+ expect(subject.authorize).to be_nil
+ expect(subject.authorization).to be_nil
+ end
+ end
+ end
+
+ describe 'write_credentials' do
+ it 'should call store to write credentials' do
+ authorization_stub = double
+ expect(authorization_stub).to receive(:refresh_token).and_return(true)
+ expect(subject).to receive(:credentials_hash)
+ expect(subject.store).to receive(:write_credentials)
+ subject.write_credentials(authorization_stub)
+ expect(subject.authorization).to be == authorization_stub
+ end
+
+ it 'should not call store to write credentials' do
+ expect(subject).not_to receive(:credentials_hash)
+ expect(subject.store).not_to receive(:write_credentials)
+ expect {
+ subject.write_credentials()
+ }.not_to raise_error
+ end
+ it 'should not call store to write credentials' do
+ expect(subject).not_to receive(:credentials_hash)
+ expect(subject.store).not_to receive(:write_credentials)
+ expect {
+ subject.write_credentials('something')
+ }.not_to raise_error
+ end
+
+ end
+
+ describe 'refresh_authorization' do
+ it 'should call refresh and write credentials' do
+ expect(subject).to receive(:write_credentials)
+ authorization_stub = double
+ expect(subject).to receive(:authorization).and_return(authorization_stub)
+ expect(authorization_stub).to receive(:refresh!).and_return(true)
+ subject.refresh_authorization
+ end
+ end
+
+ describe 'load_credentials' do
+ it 'should call store to load credentials' do
+ expect(subject.store).to receive(:load_credentials)
+ subject.send(:load_credentials)
+ end
+ end
+
+ describe 'credentials_hash' do
+ it 'should return an hash' do
+ authorization_stub = double
+ expect(authorization_stub).to receive(:access_token)
+ expect(authorization_stub).to receive(:client_id)
+ expect(authorization_stub).to receive(:client_secret)
+ expect(authorization_stub).to receive(:expires_in)
+ expect(authorization_stub).to receive(:refresh_token)
+ expect(authorization_stub).to receive(:issued_at).and_return('100')
+ allow(subject).to receive(:authorization).and_return(authorization_stub)
+ credentials = subject.send(:credentials_hash)
+ expect(credentials).to include(:access_token)
+ expect(credentials).to include(:authorization_uri)
+ expect(credentials).to include(:client_id)
+ expect(credentials).to include(:client_secret)
+ expect(credentials).to include(:expires_in)
+ expect(credentials).to include(:refresh_token)
+ expect(credentials).to include(:token_credential_uri)
+ expect(credentials).to include(:issued_at)
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/auth/storages/file_store_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/auth/storages/file_store_spec.rb
new file mode 100644
index 0000000000..2963b1d45b
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/auth/storages/file_store_spec.rb
@@ -0,0 +1,40 @@
+require 'spec_helper'
+
+require 'google/api_client'
+require 'google/api_client/version'
+
+describe Google::APIClient::FileStore do
+ let(:root_path) { File.expand_path(File.join(__FILE__, '..','..','..', '..','..')) }
+ let(:json_file) { File.expand_path(File.join(root_path, 'fixtures', 'files', 'auth_stored_credentials.json')) }
+
+ let(:credentials_hash) {{
+ "access_token"=>"my_access_token",
+ "authorization_uri"=>"https://accounts.google.com/o/oauth2/auth",
+ "client_id"=>"123456_test_client_id@.apps.googleusercontent.com",
+ "client_secret"=>"123456_client_secret",
+ "expires_in"=>3600,
+ "refresh_token"=>"my_refresh_token",
+ "token_credential_uri"=>"https://accounts.google.com/o/oauth2/token",
+ "issued_at"=>1384440275
+ }}
+
+ subject{Google::APIClient::FileStore.new('a file path')}
+
+ it 'should have a path' do
+ expect(subject.path).to be == 'a file path'
+ subject.path = 'an other file path'
+ expect(subject.path).to be == 'an other file path'
+ end
+
+ it 'should load credentials' do
+ subject.path = json_file
+ credentials = subject.load_credentials
+ expect(credentials).to include('access_token', 'authorization_uri', 'refresh_token')
+ end
+
+ it 'should write credentials' do
+ io_stub = StringIO.new
+ expect(subject).to receive(:open).and_return(io_stub)
+ subject.write_credentials(credentials_hash)
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/auth/storages/redis_store_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/auth/storages/redis_store_spec.rb
new file mode 100644
index 0000000000..de5abc4a10
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/auth/storages/redis_store_spec.rb
@@ -0,0 +1,70 @@
+require 'spec_helper'
+
+require 'google/api_client'
+require 'google/api_client/version'
+
+
+describe Google::APIClient::RedisStore do
+ let(:root_path) { File.expand_path(File.join(__FILE__, '..', '..', '..', '..', '..')) }
+ let(:json_file) { File.expand_path(File.join(root_path, 'fixtures', 'files', 'auth_stored_credentials.json')) }
+ let(:redis) {double}
+
+ let(:credentials_hash) { {
+ "access_token" => "my_access_token",
+ "authorization_uri" => "https://accounts.google.com/o/oauth2/auth",
+ "client_id" => "123456_test_client_id@.apps.googleusercontent.com",
+ "client_secret" => "123456_client_secret",
+ "expires_in" => 3600,
+ "refresh_token" => "my_refresh_token",
+ "token_credential_uri" => "https://accounts.google.com/o/oauth2/token",
+ "issued_at" => 1384440275
+ } }
+
+ subject { Google::APIClient::RedisStore.new('a redis instance') }
+
+ it 'should have a redis instance' do
+ expect(subject.redis).to be == 'a redis instance'
+ subject.redis = 'an other redis instance'
+ expect(subject.redis).to be == 'an other redis instance'
+ end
+
+ describe 'load_credentials' do
+
+ it 'should load credentials' do
+ subject.redis= redis
+ expect(redis).to receive(:get).and_return(credentials_hash.to_json)
+ expect(subject.load_credentials).to be == credentials_hash
+ end
+
+ it 'should return nil' do
+ subject.redis= redis
+ expect(redis).to receive(:get).and_return(nil)
+ expect(subject.load_credentials).to be_nil
+ end
+ end
+
+ describe 'redis_credentials_key' do
+ context 'without given key' do
+ it 'should return default key' do
+ expect(subject.redis_credentials_key).to be == "google_api_credentials"
+ end
+ end
+ context 'with given key' do
+ let(:redis_store) { Google::APIClient::RedisStore.new('a redis instance', 'another_google_api_credentials') }
+ it 'should use given key' do
+ expect(redis_store.redis_credentials_key).to be == "another_google_api_credentials"
+ end
+ end
+
+ end
+
+ describe 'write credentials' do
+
+ it 'should write credentials' do
+ subject.redis= redis
+ expect(redis).to receive(:set).and_return('ok')
+ expect(subject.write_credentials(credentials_hash)).to be_truthy
+ end
+ end
+
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/batch_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/batch_spec.rb
new file mode 100644
index 0000000000..3aa95a88b6
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/batch_spec.rb
@@ -0,0 +1,248 @@
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+require 'google/api_client'
+
+RSpec.describe Google::APIClient::BatchRequest do
+ CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)
+
+ after do
+ # Reset client to not-quite-pristine state
+ CLIENT.key = nil
+ CLIENT.user_ip = nil
+ end
+
+ it 'should raise an error if making an empty batch request' do
+ batch = Google::APIClient::BatchRequest.new
+
+ expect(lambda do
+ CLIENT.execute(batch)
+ end).to raise_error(Google::APIClient::BatchError)
+ end
+
+ it 'should allow query parameters in batch requests' do
+ batch = Google::APIClient::BatchRequest.new
+ batch.add(:uri => 'https://example.com', :parameters => {
+ 'a' => '12345'
+ })
+ method, uri, headers, body = batch.to_http_request
+ expect(body.read).to include("/?a=12345")
+ end
+
+ describe 'with the discovery API' do
+ before do
+ CLIENT.authorization = nil
+ @discovery = CLIENT.discovered_api('discovery', 'v1')
+ end
+
+ describe 'with two valid requests' do
+ before do
+ @call1 = {
+ :api_method => @discovery.apis.get_rest,
+ :parameters => {
+ 'api' => 'plus',
+ 'version' => 'v1'
+ }
+ }
+
+ @call2 = {
+ :api_method => @discovery.apis.get_rest,
+ :parameters => {
+ 'api' => 'discovery',
+ 'version' => 'v1'
+ }
+ }
+ end
+
+ it 'should execute both when using a global callback' do
+ block_called = 0
+ ids = ['first_call', 'second_call']
+ expected_ids = ids.clone
+ batch = Google::APIClient::BatchRequest.new do |result|
+ block_called += 1
+ expect(result.status).to eq(200)
+ expect(expected_ids).to include(result.response.call_id)
+ expected_ids.delete(result.response.call_id)
+ end
+
+ batch.add(@call1, ids[0])
+ batch.add(@call2, ids[1])
+
+ CLIENT.execute(batch)
+ expect(block_called).to eq(2)
+ end
+
+ it 'should execute both when using individual callbacks' do
+ batch = Google::APIClient::BatchRequest.new
+
+ call1_returned, call2_returned = false, false
+ batch.add(@call1) do |result|
+ call1_returned = true
+ expect(result.status).to eq(200)
+ end
+ batch.add(@call2) do |result|
+ call2_returned = true
+ expect(result.status).to eq(200)
+ end
+
+ CLIENT.execute(batch)
+ expect(call1_returned).to be_truthy
+ expect(call2_returned).to be_truthy
+ end
+
+ it 'should raise an error if using the same call ID more than once' do
+ batch = Google::APIClient::BatchRequest.new
+
+ expect(lambda do
+ batch.add(@call1, 'my_id')
+ batch.add(@call2, 'my_id')
+ end).to raise_error(Google::APIClient::BatchError)
+ end
+ end
+
+ describe 'with a valid request and an invalid one' do
+ before do
+ @call1 = {
+ :api_method => @discovery.apis.get_rest,
+ :parameters => {
+ 'api' => 'plus',
+ 'version' => 'v1'
+ }
+ }
+
+ @call2 = {
+ :api_method => @discovery.apis.get_rest,
+ :parameters => {
+ 'api' => 0,
+ 'version' => 1
+ }
+ }
+ end
+
+ it 'should execute both when using a global callback' do
+ block_called = 0
+ ids = ['first_call', 'second_call']
+ expected_ids = ids.clone
+ batch = Google::APIClient::BatchRequest.new do |result|
+ block_called += 1
+ expect(expected_ids).to include(result.response.call_id)
+ expected_ids.delete(result.response.call_id)
+ if result.response.call_id == ids[0]
+ expect(result.status).to eq(200)
+ else
+ expect(result.status).to be >= 400
+ expect(result.status).to be < 500
+ end
+ end
+
+ batch.add(@call1, ids[0])
+ batch.add(@call2, ids[1])
+
+ CLIENT.execute(batch)
+ expect(block_called).to eq(2)
+ end
+
+ it 'should execute both when using individual callbacks' do
+ batch = Google::APIClient::BatchRequest.new
+
+ call1_returned, call2_returned = false, false
+ batch.add(@call1) do |result|
+ call1_returned = true
+ expect(result.status).to eq(200)
+ end
+ batch.add(@call2) do |result|
+ call2_returned = true
+ expect(result.status).to be >= 400
+ expect(result.status).to be < 500
+ end
+
+ CLIENT.execute(batch)
+ expect(call1_returned).to be_truthy
+ expect(call2_returned).to be_truthy
+ end
+ end
+ end
+
+ describe 'with the calendar API' do
+ before do
+ CLIENT.authorization = nil
+ @calendar = CLIENT.discovered_api('calendar', 'v3')
+ end
+
+ describe 'with two valid requests' do
+ before do
+ event1 = {
+ 'summary' => 'Appointment 1',
+ 'location' => 'Somewhere',
+ 'start' => {
+ 'dateTime' => '2011-01-01T10:00:00.000-07:00'
+ },
+ 'end' => {
+ 'dateTime' => '2011-01-01T10:25:00.000-07:00'
+ },
+ 'attendees' => [
+ {
+ 'email' => 'myemail@mydomain.tld'
+ }
+ ]
+ }
+
+ event2 = {
+ 'summary' => 'Appointment 2',
+ 'location' => 'Somewhere as well',
+ 'start' => {
+ 'dateTime' => '2011-01-02T10:00:00.000-07:00'
+ },
+ 'end' => {
+ 'dateTime' => '2011-01-02T10:25:00.000-07:00'
+ },
+ 'attendees' => [
+ {
+ 'email' => 'myemail@mydomain.tld'
+ }
+ ]
+ }
+
+ @call1 = {
+ :api_method => @calendar.events.insert,
+ :parameters => {'calendarId' => 'myemail@mydomain.tld'},
+ :body => MultiJson.dump(event1),
+ :headers => {'Content-Type' => 'application/json'}
+ }
+
+ @call2 = {
+ :api_method => @calendar.events.insert,
+ :parameters => {'calendarId' => 'myemail@mydomain.tld'},
+ :body => MultiJson.dump(event2),
+ :headers => {'Content-Type' => 'application/json'}
+ }
+ end
+
+ it 'should convert to a correct HTTP request' do
+ batch = Google::APIClient::BatchRequest.new { |result| }
+ batch.add(@call1, '1').add(@call2, '2')
+ request = batch.to_env(CLIENT.connection)
+ boundary = Google::APIClient::BatchRequest::BATCH_BOUNDARY
+ expect(request[:method].to_s.downcase).to eq('post')
+ expect(request[:url].to_s).to eq('https://www.googleapis.com/batch')
+ expect(request[:request_headers]['Content-Type']).to eq("multipart/mixed;boundary=#{boundary}")
+ body = request[:body].read
+ expect(body).to include(@call1[:body])
+ expect(body).to include(@call2[:body])
+ end
+ end
+
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/client_secrets_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/client_secrets_spec.rb
new file mode 100644
index 0000000000..ead9bf7e99
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/client_secrets_spec.rb
@@ -0,0 +1,53 @@
+# encoding:utf-8
+
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client/client_secrets'
+
+FIXTURES_PATH = File.expand_path('../../../fixtures', __FILE__)
+
+RSpec.describe Google::APIClient::ClientSecrets do
+
+ context 'with JSON file' do
+ let(:file) { File.join(FIXTURES_PATH, 'files', 'client_secrets.json') }
+ subject(:secrets) { Google::APIClient::ClientSecrets.load(file)}
+
+ it 'should load the correct client ID' do
+ expect(secrets.client_id).to be == '898243283568.apps.googleusercontent.com'
+ end
+
+ it 'should load the correct client secret' do
+ expect(secrets.client_secret).to be == 'i8YaXdGgiQ4_KrTVNGsB7QP1'
+ end
+
+ context 'serialzed to hash' do
+ subject(:hash) { secrets.to_hash }
+ it 'should contain the flow as the first key' do
+ expect(hash).to have_key "installed"
+ end
+
+ it 'should contain the client ID' do
+ expect(hash["installed"]["client_id"]).to be == '898243283568.apps.googleusercontent.com'
+ end
+
+ it 'should contain the client secret' do
+ expect(hash["installed"]["client_secret"]).to be == 'i8YaXdGgiQ4_KrTVNGsB7QP1'
+ end
+
+ end
+ end
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/discovery_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/discovery_spec.rb
new file mode 100644
index 0000000000..d596538cca
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/discovery_spec.rb
@@ -0,0 +1,708 @@
+# encoding:utf-8
+
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+require 'spec_helper'
+
+require 'faraday'
+require 'multi_json'
+require 'compat/multi_json'
+require 'signet/oauth_1/client'
+require 'google/api_client'
+
+fixtures_path = File.expand_path('../../../fixtures', __FILE__)
+
+RSpec.describe Google::APIClient do
+ include ConnectionHelpers
+ CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)
+
+ after do
+ # Reset client to not-quite-pristine state
+ CLIENT.key = nil
+ CLIENT.user_ip = nil
+ end
+
+ it 'should raise a type error for bogus authorization' do
+ expect(lambda do
+ Google::APIClient.new(:application_name => 'API Client Tests', :authorization => 42)
+ end).to raise_error(TypeError)
+ end
+
+ it 'should not be able to retrieve the discovery document for a bogus API' do
+ expect(lambda do
+ CLIENT.discovery_document('bogus')
+ end).to raise_error(Google::APIClient::TransmissionError)
+ expect(lambda do
+ CLIENT.discovered_api('bogus')
+ end).to raise_error(Google::APIClient::TransmissionError)
+ end
+
+ it 'should raise an error for bogus services' do
+ expect(lambda do
+ CLIENT.discovered_api(42)
+ end).to raise_error(TypeError)
+ end
+
+ it 'should raise an error for bogus services' do
+ expect(lambda do
+ CLIENT.preferred_version(42)
+ end).to raise_error(TypeError)
+ end
+
+ it 'should raise an error for bogus methods' do
+ expect(lambda do
+ CLIENT.execute(42)
+ end).to raise_error(TypeError)
+ end
+
+ it 'should not return a preferred version for bogus service names' do
+ expect(CLIENT.preferred_version('bogus')).to eq(nil)
+ end
+
+ describe 'with zoo API' do
+ it 'should return API instance registered from file' do
+ zoo_json = File.join(fixtures_path, 'files', 'zoo.json')
+ contents = File.open(zoo_json, 'rb') { |io| io.read }
+ api = CLIENT.register_discovery_document('zoo', 'v1', contents)
+ expect(api).to be_kind_of(Google::APIClient::API)
+ end
+ end
+
+ describe 'with the prediction API' do
+ before do
+ CLIENT.authorization = nil
+ # The prediction API no longer exposes a v1, so we have to be
+ # careful about looking up the wrong API version.
+ @prediction = CLIENT.discovered_api('prediction', 'v1.2')
+ end
+
+ it 'should correctly determine the discovery URI' do
+ expect(CLIENT.discovery_uri('prediction')).to be ===
+ 'https://www.googleapis.com/discovery/v1/apis/prediction/v1/rest'
+ end
+
+ it 'should correctly determine the discovery URI if :user_ip is set' do
+ CLIENT.user_ip = '127.0.0.1'
+
+ conn = stub_connection do |stub|
+ stub.get('/discovery/v1/apis/prediction/v1.2/rest?userIp=127.0.0.1') do |env|
+ [200, {}, '{}']
+ end
+ end
+ CLIENT.execute(
+ :http_method => 'GET',
+ :uri => CLIENT.discovery_uri('prediction', 'v1.2'),
+ :authenticated => false,
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should correctly determine the discovery URI if :key is set' do
+ CLIENT.key = 'qwerty'
+ conn = stub_connection do |stub|
+ stub.get('/discovery/v1/apis/prediction/v1.2/rest?key=qwerty') do |env|
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :http_method => 'GET',
+ :uri => CLIENT.discovery_uri('prediction', 'v1.2'),
+ :authenticated => false,
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should correctly determine the discovery URI if both are set' do
+ CLIENT.key = 'qwerty'
+ CLIENT.user_ip = '127.0.0.1'
+ conn = stub_connection do |stub|
+ stub.get('/discovery/v1/apis/prediction/v1.2/rest?key=qwerty&userIp=127.0.0.1') do |env|
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :http_method => 'GET',
+ :uri => CLIENT.discovery_uri('prediction', 'v1.2'),
+ :authenticated => false,
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should correctly generate API objects' do
+ expect(CLIENT.discovered_api('prediction', 'v1.2').name).to eq('prediction')
+ expect(CLIENT.discovered_api('prediction', 'v1.2').version).to eq('v1.2')
+ expect(CLIENT.discovered_api(:prediction, 'v1.2').name).to eq('prediction')
+ expect(CLIENT.discovered_api(:prediction, 'v1.2').version).to eq('v1.2')
+ end
+
+ it 'should discover methods' do
+ expect(CLIENT.discovered_method(
+ 'prediction.training.insert', 'prediction', 'v1.2'
+ ).name).to eq('insert')
+ expect(CLIENT.discovered_method(
+ :'prediction.training.insert', :prediction, 'v1.2'
+ ).name).to eq('insert')
+ expect(CLIENT.discovered_method(
+ 'prediction.training.delete', 'prediction', 'v1.2'
+ ).name).to eq('delete')
+ end
+
+ it 'should define the origin API in discovered methods' do
+ expect(CLIENT.discovered_method(
+ 'prediction.training.insert', 'prediction', 'v1.2'
+ ).api.name).to eq('prediction')
+ end
+
+ it 'should not find methods that are not in the discovery document' do
+ expect(CLIENT.discovered_method(
+ 'prediction.bogus', 'prediction', 'v1.2'
+ )).to eq(nil)
+ end
+
+ it 'should raise an error for bogus methods' do
+ expect(lambda do
+ CLIENT.discovered_method(42, 'prediction', 'v1.2')
+ end).to raise_error(TypeError)
+ end
+
+ it 'should raise an error for bogus methods' do
+ expect(lambda do
+ CLIENT.execute(:api_method => CLIENT.discovered_api('prediction', 'v1.2'))
+ end).to raise_error(TypeError)
+ end
+
+ it 'should correctly determine the preferred version' do
+ expect(CLIENT.preferred_version('prediction').version).not_to eq('v1')
+ expect(CLIENT.preferred_version(:prediction).version).not_to eq('v1')
+ end
+
+ it 'should return a batch path' do
+ expect(CLIENT.discovered_api('prediction', 'v1.2').batch_path).not_to be_nil
+ end
+
+ it 'should generate valid requests' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=12345') do |env|
+ expect(env[:body]).to eq('')
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should generate valid requests when parameter value includes semicolon' do
+ conn = stub_connection do |stub|
+ # semicolon (;) in parameter value was being converted to
+ # bare ampersand (&) in 0.4.7. ensure that it gets converted
+ # to a CGI-escaped semicolon (%3B) instead.
+ stub.post('/prediction/v1.2/training?data=12345%3B67890') do |env|
+ expect(env[:body]).to eq('')
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345;67890'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should generate valid requests when multivalued parameters are passed' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=1&data=2') do |env|
+ expect(env.params['data']).to include('1', '2')
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => ['1', '2']},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should generate requests against the correct URIs' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=12345') do |env|
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should generate requests against the correct URIs' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=12345') do |env|
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should allow modification to the base URIs for testing purposes' do
+ # Using a new client instance here to avoid caching rebased discovery doc
+ prediction_rebase =
+ Google::APIClient.new(:application_name => 'API Client Tests').discovered_api('prediction', 'v1.2')
+ prediction_rebase.method_base =
+ 'https://testing-domain.example.com/prediction/v1.2/'
+
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training') do |env|
+ expect(env[:url].host).to eq('testing-domain.example.com')
+ [200, {}, '{}']
+ end
+ end
+
+ request = CLIENT.execute(
+ :api_method => prediction_rebase.training.insert,
+ :parameters => {'data' => '123'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should generate OAuth 1 requests' do
+ CLIENT.authorization = :oauth_1
+ CLIENT.authorization.token_credential_key = '12345'
+ CLIENT.authorization.token_credential_secret = '12345'
+
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=12345') do |env|
+ expect(env[:request_headers]).to have_key('Authorization')
+ expect(env[:request_headers]['Authorization']).to match(/^OAuth/)
+ [200, {}, '{}']
+ end
+ end
+
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should generate OAuth 2 requests' do
+ CLIENT.authorization = :oauth_2
+ CLIENT.authorization.access_token = '12345'
+
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=12345') do |env|
+ expect(env[:request_headers]).to have_key('Authorization')
+ expect(env[:request_headers]['Authorization']).to match(/^Bearer/)
+ [200, {}, '{}']
+ end
+ end
+
+ request = CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should not be able to execute improperly authorized requests' do
+ CLIENT.authorization = :oauth_1
+ CLIENT.authorization.token_credential_key = '12345'
+ CLIENT.authorization.token_credential_secret = '12345'
+ result = CLIENT.execute(
+ @prediction.training.insert,
+ {'data' => '12345'}
+ )
+ expect(result.response.status).to eq(401)
+ end
+
+ it 'should not be able to execute improperly authorized requests' do
+ CLIENT.authorization = :oauth_2
+ CLIENT.authorization.access_token = '12345'
+ result = CLIENT.execute(
+ @prediction.training.insert,
+ {'data' => '12345'}
+ )
+ expect(result.response.status).to eq(401)
+ end
+
+ it 'should not be able to execute improperly authorized requests' do
+ expect(lambda do
+ CLIENT.authorization = :oauth_1
+ CLIENT.authorization.token_credential_key = '12345'
+ CLIENT.authorization.token_credential_secret = '12345'
+ result = CLIENT.execute!(
+ @prediction.training.insert,
+ {'data' => '12345'}
+ )
+ end).to raise_error(Google::APIClient::ClientError)
+ end
+
+ it 'should not be able to execute improperly authorized requests' do
+ expect(lambda do
+ CLIENT.authorization = :oauth_2
+ CLIENT.authorization.access_token = '12345'
+ result = CLIENT.execute!(
+ @prediction.training.insert,
+ {'data' => '12345'}
+ )
+ end).to raise_error(Google::APIClient::ClientError)
+ end
+
+ it 'should correctly handle unnamed parameters' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training') do |env|
+ expect(env[:request_headers]).to have_key('Content-Type')
+ expect(env[:request_headers]['Content-Type']).to eq('application/json')
+ [200, {}, '{}']
+ end
+ end
+ CLIENT.authorization = :oauth_2
+ CLIENT.authorization.access_token = '12345'
+ CLIENT.execute(
+ :api_method => @prediction.training.insert,
+ :body => MultiJson.dump({"id" => "bucket/object"}),
+ :headers => {'Content-Type' => 'application/json'},
+ :connection => conn
+ )
+ conn.verify
+ end
+ end
+
+ describe 'with the plus API' do
+ before do
+ CLIENT.authorization = nil
+ @plus = CLIENT.discovered_api('plus')
+ end
+
+ it 'should correctly determine the discovery URI' do
+ expect(CLIENT.discovery_uri('plus')).to be ===
+ 'https://www.googleapis.com/discovery/v1/apis/plus/v1/rest'
+ end
+
+ it 'should find APIs that are in the discovery document' do
+ expect(CLIENT.discovered_api('plus').name).to eq('plus')
+ expect(CLIENT.discovered_api('plus').version).to eq('v1')
+ expect(CLIENT.discovered_api(:plus).name).to eq('plus')
+ expect(CLIENT.discovered_api(:plus).version).to eq('v1')
+ end
+
+ it 'should find methods that are in the discovery document' do
+ # TODO(bobaman) Fix this when the RPC names are correct
+ expect(CLIENT.discovered_method(
+ 'plus.activities.list', 'plus'
+ ).name).to eq('list')
+ end
+
+ it 'should define the origin API in discovered methods' do
+ expect(CLIENT.discovered_method(
+ 'plus.activities.list', 'plus'
+ ).api.name).to eq('plus')
+ end
+
+ it 'should not find methods that are not in the discovery document' do
+ expect(CLIENT.discovered_method('plus.bogus', 'plus')).to eq(nil)
+ end
+
+ it 'should generate requests against the correct URIs' do
+ conn = stub_connection do |stub|
+ stub.get('/plus/v1/people/107807692475771887386/activities/public') do |env|
+ [200, {}, '{}']
+ end
+ end
+
+ request = CLIENT.execute(
+ :api_method => @plus.activities.list,
+ :parameters => {
+ 'userId' => '107807692475771887386', 'collection' => 'public'
+ },
+ :authenticated => false,
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should correctly validate parameters' do
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @plus.activities.list,
+ :parameters => {'alt' => 'json'},
+ :authenticated => false
+ )
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should correctly validate parameters' do
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @plus.activities.list,
+ :parameters => {
+ 'userId' => '107807692475771887386', 'collection' => 'bogus'
+ },
+ :authenticated => false
+ ).to_env(CLIENT.connection)
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should correctly determine the service root_uri' do
+ expect(@plus.root_uri.to_s).to eq('https://www.googleapis.com/')
+ end
+ end
+
+ describe 'with the adsense API' do
+ before do
+ CLIENT.authorization = nil
+ @adsense = CLIENT.discovered_api('adsense', 'v1.3')
+ end
+
+ it 'should correctly determine the discovery URI' do
+ expect(CLIENT.discovery_uri('adsense', 'v1.3').to_s).to be ===
+ 'https://www.googleapis.com/discovery/v1/apis/adsense/v1.3/rest'
+ end
+
+ it 'should find APIs that are in the discovery document' do
+ expect(CLIENT.discovered_api('adsense', 'v1.3').name).to eq('adsense')
+ expect(CLIENT.discovered_api('adsense', 'v1.3').version).to eq('v1.3')
+ end
+
+ it 'should return a batch path' do
+ expect(CLIENT.discovered_api('adsense', 'v1.3').batch_path).not_to be_nil
+ end
+
+ it 'should find methods that are in the discovery document' do
+ expect(CLIENT.discovered_method(
+ 'adsense.reports.generate', 'adsense', 'v1.3'
+ ).name).to eq('generate')
+ end
+
+ it 'should not find methods that are not in the discovery document' do
+ expect(CLIENT.discovered_method('adsense.bogus', 'adsense', 'v1.3')).to eq(nil)
+ end
+
+ it 'should generate requests against the correct URIs' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/adclients') do |env|
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @adsense.adclients.list,
+ :authenticated => false,
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should not be able to execute requests without authorization' do
+ result = CLIENT.execute(
+ :api_method => @adsense.adclients.list,
+ :authenticated => false
+ )
+ expect(result.response.status).to eq(401)
+ end
+
+ it 'should fail when validating missing required parameters' do
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @adsense.reports.generate,
+ :authenticated => false
+ )
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should succeed when validating parameters in a correct call' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/reports?dimension=DATE&endDate=2010-01-01&metric=PAGE_VIEWS&startDate=2000-01-01') do |env|
+ [200, {}, '{}']
+ end
+ end
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @adsense.reports.generate,
+ :parameters => {
+ 'startDate' => '2000-01-01',
+ 'endDate' => '2010-01-01',
+ 'dimension' => 'DATE',
+ 'metric' => 'PAGE_VIEWS'
+ },
+ :authenticated => false,
+ :connection => conn
+ )
+ end).not_to raise_error
+ conn.verify
+ end
+
+ it 'should fail when validating parameters with invalid values' do
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @adsense.reports.generate,
+ :parameters => {
+ 'startDate' => '2000-01-01',
+ 'endDate' => '2010-01-01',
+ 'dimension' => 'BAD_CHARACTERS=-&*(£&',
+ 'metric' => 'PAGE_VIEWS'
+ },
+ :authenticated => false
+ )
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should succeed when validating repeated parameters in a correct call' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/reports?dimension=DATE&dimension=PRODUCT_CODE'+
+ '&endDate=2010-01-01&metric=CLICKS&metric=PAGE_VIEWS&'+
+ 'startDate=2000-01-01') do |env|
+ [200, {}, '{}']
+ end
+ end
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @adsense.reports.generate,
+ :parameters => {
+ 'startDate' => '2000-01-01',
+ 'endDate' => '2010-01-01',
+ 'dimension' => ['DATE', 'PRODUCT_CODE'],
+ 'metric' => ['PAGE_VIEWS', 'CLICKS']
+ },
+ :authenticated => false,
+ :connection => conn
+ )
+ end).not_to raise_error
+ conn.verify
+ end
+
+ it 'should fail when validating incorrect repeated parameters' do
+ expect(lambda do
+ CLIENT.execute(
+ :api_method => @adsense.reports.generate,
+ :parameters => {
+ 'startDate' => '2000-01-01',
+ 'endDate' => '2010-01-01',
+ 'dimension' => ['DATE', 'BAD_CHARACTERS=-&*(£&'],
+ 'metric' => ['PAGE_VIEWS', 'CLICKS']
+ },
+ :authenticated => false
+ )
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should generate valid requests when multivalued parameters are passed' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/reports?dimension=DATE&dimension=PRODUCT_CODE'+
+ '&endDate=2010-01-01&metric=CLICKS&metric=PAGE_VIEWS&'+
+ 'startDate=2000-01-01') do |env|
+ expect(env.params['dimension']).to include('DATE', 'PRODUCT_CODE')
+ expect(env.params['metric']).to include('CLICKS', 'PAGE_VIEWS')
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @adsense.reports.generate,
+ :parameters => {
+ 'startDate' => '2000-01-01',
+ 'endDate' => '2010-01-01',
+ 'dimension' => ['DATE', 'PRODUCT_CODE'],
+ 'metric' => ['PAGE_VIEWS', 'CLICKS']
+ },
+ :authenticated => false,
+ :connection => conn
+ )
+ conn.verify
+ end
+ end
+
+ describe 'with the Drive API' do
+ before do
+ CLIENT.authorization = nil
+ @drive = CLIENT.discovered_api('drive', 'v2')
+ end
+
+ it 'should include media upload info methods' do
+ expect(@drive.files.insert.media_upload).not_to eq(nil)
+ end
+
+ it 'should include accepted media types' do
+ expect(@drive.files.insert.media_upload.accepted_types).not_to be_empty
+ end
+
+ it 'should have an upload path' do
+ expect(@drive.files.insert.media_upload.uri_template).not_to eq(nil)
+ end
+
+ it 'should have a max file size' do
+ expect(@drive.files.insert.media_upload.max_size).not_to eq(nil)
+ end
+ end
+
+ describe 'with the Pub/Sub API' do
+ before do
+ CLIENT.authorization = nil
+ @pubsub = CLIENT.discovered_api('pubsub', 'v1beta2')
+ end
+
+ it 'should generate requests against the correct URIs' do
+ conn = stub_connection do |stub|
+ stub.get('/v1beta2/projects/12345/topics') do |env|
+ expect(env[:url].host).to eq('pubsub.googleapis.com')
+ [200, {}, '{}']
+ end
+ end
+ request = CLIENT.execute(
+ :api_method => @pubsub.projects.topics.list,
+ :parameters => {'project' => 'projects/12345'},
+ :connection => conn
+ )
+ conn.verify
+ end
+
+ it 'should correctly determine the service root_uri' do
+ expect(@pubsub.root_uri.to_s).to eq('https://pubsub.googleapis.com/')
+ end
+
+ it 'should discover correct method URIs' do
+ list = CLIENT.discovered_method(
+ "pubsub.projects.topics.list", "pubsub", "v1beta2"
+ )
+ expect(list.uri_template.pattern).to eq(
+ "https://pubsub.googleapis.com/v1beta2/{+project}/topics"
+ )
+
+ publish = CLIENT.discovered_method(
+ "pubsub.projects.topics.publish", "pubsub", "v1beta2"
+ )
+ expect(publish.uri_template.pattern).to eq(
+ "https://pubsub.googleapis.com/v1beta2/{+topic}:publish"
+ )
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/gzip_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/gzip_spec.rb
new file mode 100644
index 0000000000..0539b97d93
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/gzip_spec.rb
@@ -0,0 +1,98 @@
+# Encoding: utf-8
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client'
+
+RSpec.describe Google::APIClient::Gzip do
+
+ def create_connection(&block)
+ Faraday.new do |b|
+ b.response :charset
+ b.response :gzip
+ b.adapter :test do |stub|
+ stub.get '/', &block
+ end
+ end
+ end
+
+ it 'should ignore non-zipped content' do
+ conn = create_connection do |env|
+ [200, {}, 'Hello world']
+ end
+ result = conn.get('/')
+ expect(result.body).to eq("Hello world")
+ end
+
+ it 'should decompress gziped content' do
+ conn = create_connection do |env|
+ [200, { 'Content-Encoding' => 'gzip'}, Base64.decode64('H4sICLVGwlEAA3RtcADzSM3JyVcozy/KSeECANXgObcMAAAA')]
+ end
+ result = conn.get('/')
+ expect(result.body).to eq("Hello world\n")
+ end
+
+ it 'should inflate with the correct charset encoding' do
+ conn = create_connection do |env|
+ [200,
+ { 'Content-Encoding' => 'deflate', 'Content-Type' => 'application/json;charset=BIG5'},
+ Base64.decode64('eJxb8nLp7t2VAA8fBCI=')]
+ end
+ result = conn.get('/')
+ expect(result.body.encoding).to eq(Encoding::BIG5)
+ expect(result.body).to eq('æ¥æ¬èª'.encode("BIG5"))
+ end
+
+ describe 'with API Client' do
+
+ before do
+ @client = Google::APIClient.new(:application_name => 'test')
+ @client.authorization = nil
+ end
+
+
+ it 'should send gzip in user agent' do
+ conn = create_connection do |env|
+ agent = env[:request_headers]['User-Agent']
+ expect(agent).not_to be_nil
+ expect(agent).to include 'gzip'
+ [200, {}, 'Hello world']
+ end
+ @client.execute(:uri => 'http://www.example.com/', :connection => conn)
+ end
+
+ it 'should send gzip in accept-encoding' do
+ conn = create_connection do |env|
+ encoding = env[:request_headers]['Accept-Encoding']
+ expect(encoding).not_to be_nil
+ expect(encoding).to include 'gzip'
+ [200, {}, 'Hello world']
+ end
+ @client.execute(:uri => 'http://www.example.com/', :connection => conn)
+ end
+
+ it 'should not send gzip in accept-encoding if disabled for request' do
+ conn = create_connection do |env|
+ encoding = env[:request_headers]['Accept-Encoding']
+ expect(encoding).not_to include('gzip') unless encoding.nil?
+ [200, {}, 'Hello world']
+ end
+ response = @client.execute(:uri => 'http://www.example.com/', :gzip => false, :connection => conn)
+ puts response.status
+ end
+
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/media_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/media_spec.rb
new file mode 100644
index 0000000000..944981b187
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/media_spec.rb
@@ -0,0 +1,178 @@
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client'
+
+fixtures_path = File.expand_path('../../../fixtures', __FILE__)
+
+RSpec.describe Google::APIClient::UploadIO do
+ it 'should reject invalid file paths' do
+ expect(lambda do
+ media = Google::APIClient::UploadIO.new('doesnotexist', 'text/plain')
+ end).to raise_error
+ end
+
+ describe 'with a file' do
+ before do
+ @file = File.expand_path('files/sample.txt', fixtures_path)
+ @media = Google::APIClient::UploadIO.new(@file, 'text/plain')
+ end
+
+ it 'should report the correct file length' do
+ expect(@media.length).to eq(File.size(@file))
+ end
+
+ it 'should have a mime type' do
+ expect(@media.content_type).to eq('text/plain')
+ end
+ end
+
+ describe 'with StringIO' do
+ before do
+ @content = "hello world"
+ @media = Google::APIClient::UploadIO.new(StringIO.new(@content), 'text/plain', 'test.txt')
+ end
+
+ it 'should report the correct file length' do
+ expect(@media.length).to eq(@content.length)
+ end
+
+ it 'should have a mime type' do
+ expect(@media.content_type).to eq('text/plain')
+ end
+ end
+end
+
+RSpec.describe Google::APIClient::RangedIO do
+ before do
+ @source = StringIO.new("1234567890abcdef")
+ @io = Google::APIClient::RangedIO.new(@source, 1, 5)
+ end
+
+ it 'should return the correct range when read entirely' do
+ expect(@io.read).to eq("23456")
+ end
+
+ it 'should maintain position' do
+ expect(@io.read(1)).to eq('2')
+ expect(@io.read(2)).to eq('34')
+ expect(@io.read(2)).to eq('56')
+ end
+
+ it 'should allow rewinds' do
+ expect(@io.read(2)).to eq('23')
+ @io.rewind()
+ expect(@io.read(2)).to eq('23')
+ end
+
+ it 'should allow setting position' do
+ @io.pos = 3
+ expect(@io.read).to eq('56')
+ end
+
+ it 'should not allow position to be set beyond range' do
+ @io.pos = 10
+ expect(@io.read).to eq('')
+ end
+
+ it 'should return empty string when read amount is zero' do
+ expect(@io.read(0)).to eq('')
+ end
+
+ it 'should return empty string at EOF if amount is nil' do
+ @io.read
+ expect(@io.read).to eq('')
+ end
+
+ it 'should return nil at EOF if amount is positive int' do
+ @io.read
+ expect(@io.read(1)).to eq(nil)
+ end
+
+end
+
+RSpec.describe Google::APIClient::ResumableUpload do
+ CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)
+
+ after do
+ # Reset client to not-quite-pristine state
+ CLIENT.key = nil
+ CLIENT.user_ip = nil
+ end
+
+ before do
+ @drive = CLIENT.discovered_api('drive', 'v2')
+ @file = File.expand_path('files/sample.txt', fixtures_path)
+ @media = Google::APIClient::UploadIO.new(@file, 'text/plain')
+ @uploader = Google::APIClient::ResumableUpload.new(
+ :media => @media,
+ :api_method => @drive.files.insert,
+ :uri => 'https://www.googleapis.com/upload/drive/v1/files/12345')
+ end
+
+ it 'should consider 20x status as complete' do
+ request = @uploader.to_http_request
+ @uploader.process_http_response(mock_result(200))
+ expect(@uploader.complete?).to eq(true)
+ end
+
+ it 'should consider 30x status as incomplete' do
+ request = @uploader.to_http_request
+ @uploader.process_http_response(mock_result(308))
+ expect(@uploader.complete?).to eq(false)
+ expect(@uploader.expired?).to eq(false)
+ end
+
+ it 'should consider 40x status as fatal' do
+ request = @uploader.to_http_request
+ @uploader.process_http_response(mock_result(404))
+ expect(@uploader.expired?).to eq(true)
+ end
+
+ it 'should detect changes to location' do
+ request = @uploader.to_http_request
+ @uploader.process_http_response(mock_result(308, 'location' => 'https://www.googleapis.com/upload/drive/v1/files/abcdef'))
+ expect(@uploader.uri.to_s).to eq('https://www.googleapis.com/upload/drive/v1/files/abcdef')
+ end
+
+ it 'should resume from the saved range reported by the server' do
+ @uploader.chunk_size = 200
+ @uploader.to_http_request # Send bytes 0-199, only 0-99 saved
+ @uploader.process_http_response(mock_result(308, 'range' => '0-99'))
+ method, url, headers, body = @uploader.to_http_request # Send bytes 100-299
+ expect(headers['Content-Range']).to eq("bytes 100-299/#{@media.length}")
+ expect(headers['Content-length']).to eq("200")
+ end
+
+ it 'should resync the offset after 5xx errors' do
+ @uploader.chunk_size = 200
+ @uploader.to_http_request
+ @uploader.process_http_response(mock_result(500)) # Invalidates range
+ method, url, headers, body = @uploader.to_http_request # Resync
+ expect(headers['Content-Range']).to eq("bytes */#{@media.length}")
+ expect(headers['Content-length']).to eq("0")
+ @uploader.process_http_response(mock_result(308, 'range' => '0-99'))
+ method, url, headers, body = @uploader.to_http_request # Send next chunk at correct range
+ expect(headers['Content-Range']).to eq("bytes 100-299/#{@media.length}")
+ expect(headers['Content-length']).to eq("200")
+ end
+
+ def mock_result(status, headers = {})
+ reference = Google::APIClient::Reference.new(:api_method => @drive.files.insert)
+ double('result', :status => status, :headers => headers, :reference => reference)
+ end
+
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/request_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/request_spec.rb
new file mode 100644
index 0000000000..c63f750dc6
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/request_spec.rb
@@ -0,0 +1,29 @@
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client'
+
+RSpec.describe Google::APIClient::Request do
+ CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)
+
+ it 'should normalize parameter names to strings' do
+ request = Google::APIClient::Request.new(:uri => 'https://www.google.com', :parameters => {
+ :a => '1', 'b' => '2'
+ })
+ expect(request.parameters['a']).to eq('1')
+ expect(request.parameters['b']).to eq('2')
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/result_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/result_spec.rb
new file mode 100644
index 0000000000..67c63b77cf
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/result_spec.rb
@@ -0,0 +1,207 @@
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client'
+
+RSpec.describe Google::APIClient::Result do
+ CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)
+
+ describe 'with the plus API' do
+ before do
+ CLIENT.authorization = nil
+ @plus = CLIENT.discovered_api('plus', 'v1')
+ @reference = Google::APIClient::Reference.new({
+ :api_method => @plus.activities.list,
+ :parameters => {
+ 'userId' => 'me',
+ 'collection' => 'public',
+ 'maxResults' => 20
+ }
+ })
+ @request = @reference.to_http_request
+
+ # Response double
+ @response = double("response")
+ allow(@response).to receive(:status).and_return(200)
+ allow(@response).to receive(:headers).and_return({
+ 'etag' => '12345',
+ 'x-google-apiary-auth-scopes' =>
+ 'https://www.googleapis.com/auth/plus.me',
+ 'content-type' => 'application/json; charset=UTF-8',
+ 'date' => 'Mon, 23 Apr 2012 00:00:00 GMT',
+ 'cache-control' => 'private, max-age=0, must-revalidate, no-transform',
+ 'server' => 'GSE',
+ 'connection' => 'close'
+ })
+ end
+
+ describe 'with a next page token' do
+ before do
+ allow(@response).to receive(:body).and_return(
+ <<-END_OF_STRING
+ {
+ "kind": "plus#activityFeed",
+ "etag": "FOO",
+ "nextPageToken": "NEXT+PAGE+TOKEN",
+ "selfLink": "https://www.googleapis.com/plus/v1/people/foo/activities/public?",
+ "nextLink": "https://www.googleapis.com/plus/v1/people/foo/activities/public?maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN",
+ "title": "Plus Public Activity Feed for ",
+ "updated": "2012-04-23T00:00:00.000Z",
+ "id": "123456790",
+ "items": []
+ }
+ END_OF_STRING
+ )
+ @result = Google::APIClient::Result.new(@reference, @response)
+ end
+
+ it 'should indicate a successful response' do
+ expect(@result.error?).to be_falsey
+ end
+
+ it 'should return the correct next page token' do
+ expect(@result.next_page_token).to eq('NEXT+PAGE+TOKEN')
+ end
+
+ it 'should escape the next page token when calling next_page' do
+ reference = @result.next_page
+ expect(Hash[reference.parameters]).to include('pageToken')
+ expect(Hash[reference.parameters]['pageToken']).to eq('NEXT+PAGE+TOKEN')
+ url = reference.to_env(CLIENT.connection)[:url]
+ expect(url.to_s).to include('pageToken=NEXT%2BPAGE%2BTOKEN')
+ end
+
+ it 'should return content type correctly' do
+ expect(@result.media_type).to eq('application/json')
+ end
+
+ it 'should return the result data correctly' do
+ expect(@result.data?).to be_truthy
+ expect(@result.data.class.to_s).to eq(
+ 'Google::APIClient::Schema::Plus::V1::ActivityFeed'
+ )
+ expect(@result.data.kind).to eq('plus#activityFeed')
+ expect(@result.data.etag).to eq('FOO')
+ expect(@result.data.nextPageToken).to eq('NEXT+PAGE+TOKEN')
+ expect(@result.data.selfLink).to eq(
+ 'https://www.googleapis.com/plus/v1/people/foo/activities/public?'
+ )
+ expect(@result.data.nextLink).to eq(
+ 'https://www.googleapis.com/plus/v1/people/foo/activities/public?' +
+ 'maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN'
+ )
+ expect(@result.data.title).to eq('Plus Public Activity Feed for ')
+ expect(@result.data.id).to eq("123456790")
+ expect(@result.data.items).to be_empty
+ end
+ end
+
+ describe 'without a next page token' do
+ before do
+ allow(@response).to receive(:body).and_return(
+ <<-END_OF_STRING
+ {
+ "kind": "plus#activityFeed",
+ "etag": "FOO",
+ "selfLink": "https://www.googleapis.com/plus/v1/people/foo/activities/public?",
+ "title": "Plus Public Activity Feed for ",
+ "updated": "2012-04-23T00:00:00.000Z",
+ "id": "123456790",
+ "items": []
+ }
+ END_OF_STRING
+ )
+ @result = Google::APIClient::Result.new(@reference, @response)
+ end
+
+ it 'should not return a next page token' do
+ expect(@result.next_page_token).to eq(nil)
+ end
+
+ it 'should return content type correctly' do
+ expect(@result.media_type).to eq('application/json')
+ end
+
+ it 'should return the result data correctly' do
+ expect(@result.data?).to be_truthy
+ expect(@result.data.class.to_s).to eq(
+ 'Google::APIClient::Schema::Plus::V1::ActivityFeed'
+ )
+ expect(@result.data.kind).to eq('plus#activityFeed')
+ expect(@result.data.etag).to eq('FOO')
+ expect(@result.data.selfLink).to eq(
+ 'https://www.googleapis.com/plus/v1/people/foo/activities/public?'
+ )
+ expect(@result.data.title).to eq('Plus Public Activity Feed for ')
+ expect(@result.data.id).to eq("123456790")
+ expect(@result.data.items).to be_empty
+ end
+ end
+
+ describe 'with JSON error response' do
+ before do
+ allow(@response).to receive(:body).and_return(
+ <<-END_OF_STRING
+ {
+ "error": {
+ "errors": [
+ {
+ "domain": "global",
+ "reason": "parseError",
+ "message": "Parse Error"
+ }
+ ],
+ "code": 400,
+ "message": "Parse Error"
+ }
+ }
+ END_OF_STRING
+ )
+ allow(@response).to receive(:status).and_return(400)
+ @result = Google::APIClient::Result.new(@reference, @response)
+ end
+
+ it 'should return error status correctly' do
+ expect(@result.error?).to be_truthy
+ end
+
+ it 'should return the correct error message' do
+ expect(@result.error_message).to eq('Parse Error')
+ end
+ end
+
+ describe 'with 204 No Content response' do
+ before do
+ allow(@response).to receive(:body).and_return('')
+ allow(@response).to receive(:status).and_return(204)
+ allow(@response).to receive(:headers).and_return({})
+ @result = Google::APIClient::Result.new(@reference, @response)
+ end
+
+ it 'should indicate no data is available' do
+ expect(@result.data?).to be_falsey
+ end
+
+ it 'should return nil for data' do
+ expect(@result.data).to eq(nil)
+ end
+
+ it 'should return nil for media_type' do
+ expect(@result.media_type).to eq(nil)
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/service_account_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/service_account_spec.rb
new file mode 100644
index 0000000000..6314cea6bc
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/service_account_spec.rb
@@ -0,0 +1,169 @@
+# Copyright 2012 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client'
+
+fixtures_path = File.expand_path('../../../fixtures', __FILE__)
+
+RSpec.describe Google::APIClient::KeyUtils do
+ it 'should read PKCS12 files from the filesystem' do
+ if RUBY_PLATFORM == 'java' && RUBY_VERSION.start_with?('1.8')
+ pending "Reading from PKCS12 not supported on jruby 1.8.x"
+ end
+ path = File.expand_path('files/privatekey.p12', fixtures_path)
+ key = Google::APIClient::KeyUtils.load_from_pkcs12(path, 'notasecret')
+ expect(key).not_to eq(nil)
+ end
+
+ it 'should read PKCS12 files from loaded files' do
+ if RUBY_PLATFORM == 'java' && RUBY_VERSION.start_with?('1.8')
+ pending "Reading from PKCS12 not supported on jruby 1.8.x"
+ end
+ path = File.expand_path('files/privatekey.p12', fixtures_path)
+ content = File.read(path)
+ key = Google::APIClient::KeyUtils.load_from_pkcs12(content, 'notasecret')
+ expect(key).not_to eq(nil)
+ end
+
+ it 'should read PEM files from the filesystem' do
+ path = File.expand_path('files/secret.pem', fixtures_path)
+ key = Google::APIClient::KeyUtils.load_from_pem(path, 'notasecret')
+ expect(key).not_to eq(nil)
+ end
+
+ it 'should read PEM files from loaded files' do
+ path = File.expand_path('files/secret.pem', fixtures_path)
+ content = File.read(path)
+ key = Google::APIClient::KeyUtils.load_from_pem(content, 'notasecret')
+ expect(key).not_to eq(nil)
+ end
+
+end
+
+RSpec.describe Google::APIClient::JWTAsserter do
+ include ConnectionHelpers
+
+ before do
+ @key = OpenSSL::PKey::RSA.new 2048
+ end
+
+ it 'should generate valid JWTs' do
+ asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)
+ jwt = asserter.to_authorization.to_jwt
+ expect(jwt).not_to eq(nil)
+
+ claim = JWT.decode(jwt, @key.public_key, true)
+ claim = claim[0] if claim[0]
+ expect(claim["iss"]).to eq('client1')
+ expect(claim["scope"]).to eq('scope1 scope2')
+ end
+
+ it 'should allow impersonation' do
+ conn = stub_connection do |stub|
+ stub.post('/o/oauth2/token') do |env|
+ params = Addressable::URI.form_unencode(env[:body])
+ JWT.decode(params.assoc("assertion").last, @key.public_key)
+ expect(params.assoc("grant_type")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])
+ [200, {'content-type' => 'application/json'}, '{
+ "access_token" : "1/abcdef1234567890",
+ "token_type" : "Bearer",
+ "expires_in" : 3600
+ }']
+ end
+ end
+ asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)
+ auth = asserter.authorize('user1@email.com', { :connection => conn })
+ expect(auth).not_to eq(nil?)
+ expect(auth.person).to eq('user1@email.com')
+ conn.verify
+ end
+
+ it 'should send valid access token request' do
+ conn = stub_connection do |stub|
+ stub.post('/o/oauth2/token') do |env|
+ params = Addressable::URI.form_unencode(env[:body])
+ JWT.decode(params.assoc("assertion").last, @key.public_key)
+ expect(params.assoc("grant_type")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])
+ [200, {'content-type' => 'application/json'}, '{
+ "access_token" : "1/abcdef1234567890",
+ "token_type" : "Bearer",
+ "expires_in" : 3600
+ }']
+ end
+ end
+ asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)
+ auth = asserter.authorize(nil, { :connection => conn })
+ expect(auth).not_to eq(nil?)
+ expect(auth.access_token).to eq("1/abcdef1234567890")
+ conn.verify
+ end
+
+ it 'should be refreshable' do
+ conn = stub_connection do |stub|
+ stub.post('/o/oauth2/token') do |env|
+ params = Addressable::URI.form_unencode(env[:body])
+ JWT.decode(params.assoc("assertion").last, @key.public_key)
+ expect(params.assoc("grant_type")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])
+ [200, {'content-type' => 'application/json'}, '{
+ "access_token" : "1/abcdef1234567890",
+ "token_type" : "Bearer",
+ "expires_in" : 3600
+ }']
+ end
+ stub.post('/o/oauth2/token') do |env|
+ params = Addressable::URI.form_unencode(env[:body])
+ JWT.decode(params.assoc("assertion").last, @key.public_key)
+ expect(params.assoc("grant_type")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])
+ [200, {'content-type' => 'application/json'}, '{
+ "access_token" : "1/0987654321fedcba",
+ "token_type" : "Bearer",
+ "expires_in" : 3600
+ }']
+ end
+ end
+ asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)
+ auth = asserter.authorize(nil, { :connection => conn })
+ expect(auth).not_to eq(nil?)
+ expect(auth.access_token).to eq("1/abcdef1234567890")
+
+ auth.fetch_access_token!(:connection => conn)
+ expect(auth.access_token).to eq("1/0987654321fedcba")
+
+ conn.verify
+ end
+end
+
+RSpec.describe Google::APIClient::ComputeServiceAccount do
+ include ConnectionHelpers
+
+ it 'should query metadata server' do
+ conn = stub_connection do |stub|
+ stub.get('/computeMetadata/v1beta1/instance/service-accounts/default/token') do |env|
+ expect(env.url.host).to eq('metadata')
+ [200, {'content-type' => 'application/json'}, '{
+ "access_token" : "1/abcdef1234567890",
+ "token_type" : "Bearer",
+ "expires_in" : 3600
+ }']
+ end
+ end
+ service_account = Google::APIClient::ComputeServiceAccount.new
+ auth = service_account.fetch_access_token!({ :connection => conn })
+ expect(auth).not_to eq(nil?)
+ expect(auth["access_token"]).to eq("1/abcdef1234567890")
+ conn.verify
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/service_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/service_spec.rb
new file mode 100644
index 0000000000..fbbdd53ee9
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/service_spec.rb
@@ -0,0 +1,618 @@
+# encoding:utf-8
+
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client'
+require 'google/api_client/service'
+
+fixtures_path = File.expand_path('../../../fixtures', __FILE__)
+
+RSpec.describe Google::APIClient::Service do
+ include ConnectionHelpers
+
+ APPLICATION_NAME = 'API Client Tests'
+
+ it 'should error out when called without an API name or version' do
+ expect(lambda do
+ Google::APIClient::Service.new
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should error out when called without an API version' do
+ expect(lambda do
+ Google::APIClient::Service.new('foo')
+ end).to raise_error(ArgumentError)
+ end
+
+ it 'should error out when the options hash is not a hash' do
+ expect(lambda do
+ Google::APIClient::Service.new('foo', 'v1', 42)
+ end).to raise_error(ArgumentError)
+ end
+
+ describe 'with the AdSense Management API' do
+
+ it 'should make a valid call for a method with no parameters' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/adclients') do |env|
+ [200, {}, '{}']
+ end
+ end
+ adsense = Google::APIClient::Service.new(
+ 'adsense',
+ 'v1.3',
+ {
+ :application_name => APPLICATION_NAME,
+ :authenticated => false,
+ :connection => conn,
+ :cache_store => nil
+ }
+ )
+
+ req = adsense.adclients.list.execute()
+ conn.verify
+ end
+
+ it 'should make a valid call for a method with parameters' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/adclients/1/adunits') do |env|
+ [200, {}, '{}']
+ end
+ end
+ adsense = Google::APIClient::Service.new(
+ 'adsense',
+ 'v1.3',
+ {
+ :application_name => APPLICATION_NAME,
+ :authenticated => false,
+ :connection => conn,
+ :cache_store => nil
+ }
+ )
+ req = adsense.adunits.list(:adClientId => '1').execute()
+ end
+
+ it 'should make a valid call for a deep method' do
+ conn = stub_connection do |stub|
+ stub.get('/adsense/v1.3/accounts/1/adclients') do |env|
+ [200, {}, '{}']
+ end
+ end
+ adsense = Google::APIClient::Service.new(
+ 'adsense',
+ 'v1.3',
+ {
+ :application_name => APPLICATION_NAME,
+ :authenticated => false,
+ :connection => conn,
+ :cache_store => nil
+ }
+ )
+ req = adsense.accounts.adclients.list(:accountId => '1').execute()
+ end
+
+ describe 'with no connection' do
+ before do
+ @adsense = Google::APIClient::Service.new('adsense', 'v1.3',
+ {:application_name => APPLICATION_NAME, :cache_store => nil})
+ end
+
+ it 'should return a resource when using a valid resource name' do
+ expect(@adsense.accounts).to be_a(Google::APIClient::Service::Resource)
+ end
+
+ it 'should throw an error when using an invalid resource name' do
+ expect(lambda do
+ @adsense.invalid_resource
+ end).to raise_error
+ end
+
+ it 'should return a request when using a valid method name' do
+ req = @adsense.adclients.list
+ expect(req).to be_a(Google::APIClient::Service::Request)
+ expect(req.method.id).to eq('adsense.adclients.list')
+ expect(req.parameters).to be_nil
+ end
+
+ it 'should throw an error when using an invalid method name' do
+ expect(lambda do
+ @adsense.adclients.invalid_method
+ end).to raise_error
+ end
+
+ it 'should return a valid request with parameters' do
+ req = @adsense.adunits.list(:adClientId => '1')
+ expect(req).to be_a(Google::APIClient::Service::Request)
+ expect(req.method.id).to eq('adsense.adunits.list')
+ expect(req.parameters).not_to be_nil
+ expect(req.parameters[:adClientId]).to eq('1')
+ end
+ end
+ end
+
+ describe 'with the Prediction API' do
+
+ it 'should make a valid call with an object body' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.5/trainedmodels?project=1') do |env|
+ expect(env.body).to eq('{"id":"1"}')
+ [200, {}, '{}']
+ end
+ end
+ prediction = Google::APIClient::Service.new(
+ 'prediction',
+ 'v1.5',
+ {
+ :application_name => APPLICATION_NAME,
+ :authenticated => false,
+ :connection => conn,
+ :cache_store => nil
+ }
+ )
+ req = prediction.trainedmodels.insert(:project => '1').body({'id' => '1'}).execute()
+ conn.verify
+ end
+
+ it 'should make a valid call with a text body' do
+ conn = stub_connection do |stub|
+ stub.post('/prediction/v1.5/trainedmodels?project=1') do |env|
+ expect(env.body).to eq('{"id":"1"}')
+ [200, {}, '{}']
+ end
+ end
+ prediction = Google::APIClient::Service.new(
+ 'prediction',
+ 'v1.5',
+ {
+ :application_name => APPLICATION_NAME,
+ :authenticated => false,
+ :connection => conn,
+ :cache_store => nil
+ }
+ )
+ req = prediction.trainedmodels.insert(:project => '1').body('{"id":"1"}').execute()
+ conn.verify
+ end
+
+ describe 'with no connection' do
+ before do
+ @prediction = Google::APIClient::Service.new('prediction', 'v1.5',
+ {:application_name => APPLICATION_NAME, :cache_store => nil})
+ end
+
+ it 'should return a valid request with a body' do
+ req = @prediction.trainedmodels.insert(:project => '1').body({'id' => '1'})
+ expect(req).to be_a(Google::APIClient::Service::Request)
+ expect(req.method.id).to eq('prediction.trainedmodels.insert')
+ expect(req.body).to eq({'id' => '1'})
+ expect(req.parameters).not_to be_nil
+ expect(req.parameters[:project]).to eq('1')
+ end
+
+ it 'should return a valid request with a body when using resource name' do
+ req = @prediction.trainedmodels.insert(:project => '1').training({'id' => '1'})
+ expect(req).to be_a(Google::APIClient::Service::Request)
+ expect(req.method.id).to eq('prediction.trainedmodels.insert')
+ expect(req.training).to eq({'id' => '1'})
+ expect(req.parameters).not_to be_nil
+ expect(req.parameters[:project]).to eq('1')
+ end
+ end
+ end
+
+ describe 'with the Drive API' do
+
+ before do
+ @metadata = {
+ 'title' => 'My movie',
+ 'description' => 'The best home movie ever made'
+ }
+ @file = File.expand_path('files/sample.txt', fixtures_path)
+ @media = Google::APIClient::UploadIO.new(@file, 'text/plain')
+ end
+
+ it 'should make a valid call with an object body and media upload' do
+ conn = stub_connection do |stub|
+ stub.post('/upload/drive/v2/files?uploadType=multipart') do |env|
+ expect(env.body).to be_a Faraday::CompositeReadIO
+ [200, {}, '{}']
+ end
+ end
+ drive = Google::APIClient::Service.new(
+ 'drive',
+ 'v2',
+ {
+ :application_name => APPLICATION_NAME,
+ :authenticated => false,
+ :connection => conn,
+ :cache_store => nil
+ }
+ )
+ req = drive.files.insert(:uploadType => 'multipart').body(@metadata).media(@media).execute()
+ conn.verify
+ end
+
+ describe 'with no connection' do
+ before do
+ @drive = Google::APIClient::Service.new('drive', 'v2',
+ {:application_name => APPLICATION_NAME, :cache_store => nil})
+ end
+
+ it 'should return a valid request with a body and media upload' do
+ req = @drive.files.insert(:uploadType => 'multipart').body(@metadata).media(@media)
+ expect(req).to be_a(Google::APIClient::Service::Request)
+ expect(req.method.id).to eq('drive.files.insert')
+ expect(req.body).to eq(@metadata)
+ expect(req.media).to eq(@media)
+ expect(req.parameters).not_to be_nil
+ expect(req.parameters[:uploadType]).to eq('multipart')
+ end
+
+ it 'should return a valid request with a body and media upload when using resource name' do
+ req = @drive.files.insert(:uploadType => 'multipart').file(@metadata).media(@media)
+ expect(req).to be_a(Google::APIClient::Service::Request)
+ expect(req.method.id).to eq('drive.files.insert')
+ expect(req.file).to eq(@metadata)
+ expect(req.media).to eq(@media)
+ expect(req.parameters).not_to be_nil
+ expect(req.parameters[:uploadType]).to eq('multipart')
+ end
+ end
+ end
+
+ describe 'with the Discovery API' do
+ it 'should make a valid end-to-end request' do
+ discovery = Google::APIClient::Service.new('discovery', 'v1',
+ {:application_name => APPLICATION_NAME, :authenticated => false,
+ :cache_store => nil})
+ result = discovery.apis.get_rest(:api => 'discovery', :version => 'v1').execute
+ expect(result).not_to be_nil
+ expect(result.data.name).to eq('discovery')
+ expect(result.data.version).to eq('v1')
+ end
+ end
+end
+
+
+RSpec.describe Google::APIClient::Service::Result do
+
+ describe 'with the plus API' do
+ before do
+ @plus = Google::APIClient::Service.new('plus', 'v1',
+ {:application_name => APPLICATION_NAME, :cache_store => nil})
+ @reference = Google::APIClient::Reference.new({
+ :api_method => @plus.activities.list.method,
+ :parameters => {
+ 'userId' => 'me',
+ 'collection' => 'public',
+ 'maxResults' => 20
+ }
+ })
+ @request = @plus.activities.list(:userId => 'me', :collection => 'public',
+ :maxResults => 20)
+
+ # Response double
+ @response = double("response")
+ allow(@response).to receive(:status).and_return(200)
+ allow(@response).to receive(:headers).and_return({
+ 'etag' => '12345',
+ 'x-google-apiary-auth-scopes' =>
+ 'https://www.googleapis.com/auth/plus.me',
+ 'content-type' => 'application/json; charset=UTF-8',
+ 'date' => 'Mon, 23 Apr 2012 00:00:00 GMT',
+ 'cache-control' => 'private, max-age=0, must-revalidate, no-transform',
+ 'server' => 'GSE',
+ 'connection' => 'close'
+ })
+ end
+
+ describe 'with a next page token' do
+ before do
+ @body = <<-END_OF_STRING
+ {
+ "kind": "plus#activityFeed",
+ "etag": "FOO",
+ "nextPageToken": "NEXT+PAGE+TOKEN",
+ "selfLink": "https://www.googleapis.com/plus/v1/people/foo/activities/public?",
+ "nextLink": "https://www.googleapis.com/plus/v1/people/foo/activities/public?maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN",
+ "title": "Plus Public Activity Feed for ",
+ "updated": "2012-04-23T00:00:00.000Z",
+ "id": "123456790",
+ "items": []
+ }
+ END_OF_STRING
+ allow(@response).to receive(:body).and_return(@body)
+ base_result = Google::APIClient::Result.new(@reference, @response)
+ @result = Google::APIClient::Service::Result.new(@request, base_result)
+ end
+
+ it 'should indicate a successful response' do
+ expect(@result.error?).to be_falsey
+ end
+
+ it 'should return the correct next page token' do
+ expect(@result.next_page_token).to eq('NEXT+PAGE+TOKEN')
+ end
+
+ it 'generate a correct request when calling next_page' do
+ next_page_request = @result.next_page
+ expect(next_page_request.parameters).to include('pageToken')
+ expect(next_page_request.parameters['pageToken']).to eq('NEXT+PAGE+TOKEN')
+ @request.parameters.each_pair do |param, value|
+ expect(next_page_request.parameters[param]).to eq(value)
+ end
+ end
+
+ it 'should return content type correctly' do
+ expect(@result.media_type).to eq('application/json')
+ end
+
+ it 'should return the body correctly' do
+ expect(@result.body).to eq(@body)
+ end
+
+ it 'should return the result data correctly' do
+ expect(@result.data?).to be_truthy
+ expect(@result.data.class.to_s).to eq(
+ 'Google::APIClient::Schema::Plus::V1::ActivityFeed'
+ )
+ expect(@result.data.kind).to eq('plus#activityFeed')
+ expect(@result.data.etag).to eq('FOO')
+ expect(@result.data.nextPageToken).to eq('NEXT+PAGE+TOKEN')
+ expect(@result.data.selfLink).to eq(
+ 'https://www.googleapis.com/plus/v1/people/foo/activities/public?'
+ )
+ expect(@result.data.nextLink).to eq(
+ 'https://www.googleapis.com/plus/v1/people/foo/activities/public?' +
+ 'maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN'
+ )
+ expect(@result.data.title).to eq('Plus Public Activity Feed for ')
+ expect(@result.data.id).to eq("123456790")
+ expect(@result.data.items).to be_empty
+ end
+ end
+
+ describe 'without a next page token' do
+ before do
+ @body = <<-END_OF_STRING
+ {
+ "kind": "plus#activityFeed",
+ "etag": "FOO",
+ "selfLink": "https://www.googleapis.com/plus/v1/people/foo/activities/public?",
+ "title": "Plus Public Activity Feed for ",
+ "updated": "2012-04-23T00:00:00.000Z",
+ "id": "123456790",
+ "items": []
+ }
+ END_OF_STRING
+ allow(@response).to receive(:body).and_return(@body)
+ base_result = Google::APIClient::Result.new(@reference, @response)
+ @result = Google::APIClient::Service::Result.new(@request, base_result)
+ end
+
+ it 'should not return a next page token' do
+ expect(@result.next_page_token).to eq(nil)
+ end
+
+ it 'should return content type correctly' do
+ expect(@result.media_type).to eq('application/json')
+ end
+
+ it 'should return the body correctly' do
+ expect(@result.body).to eq(@body)
+ end
+
+ it 'should return the result data correctly' do
+ expect(@result.data?).to be_truthy
+ expect(@result.data.class.to_s).to eq(
+ 'Google::APIClient::Schema::Plus::V1::ActivityFeed'
+ )
+ expect(@result.data.kind).to eq('plus#activityFeed')
+ expect(@result.data.etag).to eq('FOO')
+ expect(@result.data.selfLink).to eq(
+ 'https://www.googleapis.com/plus/v1/people/foo/activities/public?'
+ )
+ expect(@result.data.title).to eq('Plus Public Activity Feed for ')
+ expect(@result.data.id).to eq("123456790")
+ expect(@result.data.items).to be_empty
+ end
+ end
+
+ describe 'with JSON error response' do
+ before do
+ @body = <<-END_OF_STRING
+ {
+ "error": {
+ "errors": [
+ {
+ "domain": "global",
+ "reason": "parseError",
+ "message": "Parse Error"
+ }
+ ],
+ "code": 400,
+ "message": "Parse Error"
+ }
+ }
+ END_OF_STRING
+ allow(@response).to receive(:body).and_return(@body)
+ allow(@response).to receive(:status).and_return(400)
+ base_result = Google::APIClient::Result.new(@reference, @response)
+ @result = Google::APIClient::Service::Result.new(@request, base_result)
+ end
+
+ it 'should return error status correctly' do
+ expect(@result.error?).to be_truthy
+ end
+
+ it 'should return the correct error message' do
+ expect(@result.error_message).to eq('Parse Error')
+ end
+
+ it 'should return the body correctly' do
+ expect(@result.body).to eq(@body)
+ end
+ end
+
+ describe 'with 204 No Content response' do
+ before do
+ allow(@response).to receive(:body).and_return('')
+ allow(@response).to receive(:status).and_return(204)
+ allow(@response).to receive(:headers).and_return({})
+ base_result = Google::APIClient::Result.new(@reference, @response)
+ @result = Google::APIClient::Service::Result.new(@request, base_result)
+ end
+
+ it 'should indicate no data is available' do
+ expect(@result.data?).to be_falsey
+ end
+
+ it 'should return nil for data' do
+ expect(@result.data).to eq(nil)
+ end
+
+ it 'should return nil for media_type' do
+ expect(@result.media_type).to eq(nil)
+ end
+ end
+ end
+end
+
+RSpec.describe Google::APIClient::Service::BatchRequest do
+
+ include ConnectionHelpers
+
+ context 'with a service connection' do
+ before do
+ @conn = stub_connection do |stub|
+ stub.post('/batch') do |env|
+ [500, {'Content-Type' => 'application/json'}, '{}']
+ end
+ end
+ @discovery = Google::APIClient::Service.new('discovery', 'v1',
+ {:application_name => APPLICATION_NAME, :authorization => nil,
+ :cache_store => nil, :connection => @conn})
+ @calls = [
+ @discovery.apis.get_rest(:api => 'plus', :version => 'v1'),
+ @discovery.apis.get_rest(:api => 'discovery', :version => 'v1')
+ ]
+ end
+
+ it 'should use the service connection' do
+ batch = @discovery.batch(@calls) do
+ end
+ batch.execute
+ @conn.verify
+ end
+ end
+
+ describe 'with the discovery API' do
+ before do
+ @discovery = Google::APIClient::Service.new('discovery', 'v1',
+ {:application_name => APPLICATION_NAME, :authorization => nil,
+ :cache_store => nil})
+ end
+
+ describe 'with two valid requests' do
+ before do
+ @calls = [
+ @discovery.apis.get_rest(:api => 'plus', :version => 'v1'),
+ @discovery.apis.get_rest(:api => 'discovery', :version => 'v1')
+ ]
+ end
+
+ it 'should execute both when using a global callback' do
+ block_called = 0
+ batch = @discovery.batch(@calls) do |result|
+ block_called += 1
+ expect(result.status).to eq(200)
+ end
+
+ batch.execute
+ expect(block_called).to eq(2)
+ end
+
+ it 'should execute both when using individual callbacks' do
+ call1_returned, call2_returned = false, false
+ batch = @discovery.batch
+
+ batch.add(@calls[0]) do |result|
+ call1_returned = true
+ expect(result.status).to eq(200)
+ expect(result.call_index).to eq(0)
+ end
+
+ batch.add(@calls[1]) do |result|
+ call2_returned = true
+ expect(result.status).to eq(200)
+ expect(result.call_index).to eq(1)
+ end
+
+ batch.execute
+ expect(call1_returned).to eq(true)
+ expect(call2_returned).to eq(true)
+ end
+ end
+
+ describe 'with a valid request and an invalid one' do
+ before do
+ @calls = [
+ @discovery.apis.get_rest(:api => 'plus', :version => 'v1'),
+ @discovery.apis.get_rest(:api => 'invalid', :version => 'invalid')
+ ]
+ end
+
+ it 'should execute both when using a global callback' do
+ block_called = 0
+ batch = @discovery.batch(@calls) do |result|
+ block_called += 1
+ if result.call_index == 0
+ expect(result.status).to eq(200)
+ else
+ expect(result.status).to be >= 400
+ expect(result.status).to be < 500
+ end
+ end
+
+ batch.execute
+ expect(block_called).to eq(2)
+ end
+
+ it 'should execute both when using individual callbacks' do
+ call1_returned, call2_returned = false, false
+ batch = @discovery.batch
+
+ batch.add(@calls[0]) do |result|
+ call1_returned = true
+ expect(result.status).to eq(200)
+ expect(result.call_index).to eq(0)
+ end
+
+ batch.add(@calls[1]) do |result|
+ call2_returned = true
+ expect(result.status).to be >= 400
+ expect(result.status).to be < 500
+ expect(result.call_index).to eq(1)
+ end
+
+ batch.execute
+ expect(call1_returned).to eq(true)
+ expect(call2_returned).to eq(true)
+ end
+ end
+ end
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/google/api_client/simple_file_store_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client/simple_file_store_spec.rb
new file mode 100644
index 0000000000..cb7d898475
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client/simple_file_store_spec.rb
@@ -0,0 +1,133 @@
+# encoding:utf-8
+
+# Copyright 2013 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'google/api_client/service/simple_file_store'
+
+RSpec.describe Google::APIClient::Service::SimpleFileStore do
+
+ FILE_NAME = 'test.cache'
+
+ describe 'with no cache file' do
+ before(:each) do
+ File.delete(FILE_NAME) if File.exists?(FILE_NAME)
+ @cache = Google::APIClient::Service::SimpleFileStore.new(FILE_NAME)
+ end
+
+ it 'should return nil when asked if a key exists' do
+ expect(@cache.exist?('invalid')).to be_nil
+ expect(File.exists?(FILE_NAME)).to be_falsey
+ end
+
+ it 'should return nil when asked to read a key' do
+ expect(@cache.read('invalid')).to be_nil
+ expect(File.exists?(FILE_NAME)).to be_falsey
+ end
+
+ it 'should return nil when asked to fetch a key' do
+ expect(@cache.fetch('invalid')).to be_nil
+ expect(File.exists?(FILE_NAME)).to be_falsey
+ end
+
+ it 'should create a cache file when asked to fetch a key with a default' do
+ expect(@cache.fetch('new_key') do
+ 'value'
+ end).to eq('value')
+ expect(File.exists?(FILE_NAME)).to be_truthy
+ end
+
+ it 'should create a cache file when asked to write a key' do
+ @cache.write('new_key', 'value')
+ expect(File.exists?(FILE_NAME)).to be_truthy
+ end
+
+ it 'should return nil when asked to delete a key' do
+ expect(@cache.delete('invalid')).to be_nil
+ expect(File.exists?(FILE_NAME)).to be_falsey
+ end
+ end
+
+ describe 'with an existing cache' do
+ before(:each) do
+ File.delete(FILE_NAME) if File.exists?(FILE_NAME)
+ @cache = Google::APIClient::Service::SimpleFileStore.new(FILE_NAME)
+ @cache.write('existing_key', 'existing_value')
+ end
+
+ it 'should return true when asked if an existing key exists' do
+ expect(@cache.exist?('existing_key')).to be_truthy
+ end
+
+ it 'should return false when asked if a nonexistent key exists' do
+ expect(@cache.exist?('invalid')).to be_falsey
+ end
+
+ it 'should return the value for an existing key when asked to read it' do
+ expect(@cache.read('existing_key')).to eq('existing_value')
+ end
+
+ it 'should return nil for a nonexistent key when asked to read it' do
+ expect(@cache.read('invalid')).to be_nil
+ end
+
+ it 'should return the value for an existing key when asked to read it' do
+ expect(@cache.read('existing_key')).to eq('existing_value')
+ end
+
+ it 'should return nil for a nonexistent key when asked to fetch it' do
+ expect(@cache.fetch('invalid')).to be_nil
+ end
+
+ it 'should return and save the default value for a nonexistent key when asked to fetch it with a default' do
+ expect(@cache.fetch('new_key') do
+ 'value'
+ end).to eq('value')
+ expect(@cache.read('new_key')).to eq('value')
+ end
+
+ it 'should remove an existing value and return true when asked to delete it' do
+ expect(@cache.delete('existing_key')).to be_truthy
+ expect(@cache.read('existing_key')).to be_nil
+ end
+
+ it 'should return false when asked to delete a nonexistent key' do
+ expect(@cache.delete('invalid')).to be_falsey
+ end
+
+ it 'should convert keys to strings when storing them' do
+ @cache.write(:symbol_key, 'value')
+ expect(@cache.read('symbol_key')).to eq('value')
+ end
+
+ it 'should convert keys to strings when reading them' do
+ expect(@cache.read(:existing_key)).to eq('existing_value')
+ end
+
+ it 'should convert keys to strings when fetching them' do
+ expect(@cache.fetch(:existing_key)).to eq('existing_value')
+ end
+
+ it 'should convert keys to strings when deleting them' do
+ expect(@cache.delete(:existing_key)).to be_truthy
+ expect(@cache.read('existing_key')).to be_nil
+ end
+ end
+
+ after(:all) do
+ File.delete(FILE_NAME) if File.exists?(FILE_NAME)
+ end
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/spec/google/api_client_spec.rb b/sdk/ruby-google-api-client/spec/google/api_client_spec.rb
new file mode 100644
index 0000000000..eb9a59af7b
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/google/api_client_spec.rb
@@ -0,0 +1,352 @@
+# Copyright 2010 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'spec_helper'
+
+require 'faraday'
+require 'signet/oauth_1/client'
+require 'google/api_client'
+
+shared_examples_for 'configurable user agent' do
+ include ConnectionHelpers
+
+ it 'should allow the user agent to be modified' do
+ client.user_agent = 'Custom User Agent/1.2.3'
+ expect(client.user_agent).to eq('Custom User Agent/1.2.3')
+ end
+
+ it 'should allow the user agent to be set to nil' do
+ client.user_agent = nil
+ expect(client.user_agent).to eq(nil)
+ end
+
+ it 'should not allow the user agent to be used with bogus values' do
+ expect(lambda do
+ client.user_agent = 42
+ client.execute(:uri=>'https://www.google.com/')
+ end).to raise_error(TypeError)
+ end
+
+ it 'should transmit a User-Agent header when sending requests' do
+ client.user_agent = 'Custom User Agent/1.2.3'
+
+ conn = stub_connection do |stub|
+ stub.get('/') do |env|
+ headers = env[:request_headers]
+ expect(headers).to have_key('User-Agent')
+ expect(headers['User-Agent']).to eq(client.user_agent)
+ [200, {}, ['']]
+ end
+ end
+ client.execute(:uri=>'https://www.google.com/', :connection => conn)
+ conn.verify
+ end
+end
+
+RSpec.describe Google::APIClient do
+ include ConnectionHelpers
+
+ let(:client) { Google::APIClient.new(:application_name => 'API Client Tests') }
+
+ it "should pass the faraday options provided on initialization to FaraDay configuration block" do
+ client = Google::APIClient.new(faraday_option: {timeout: 999})
+ expect(client.connection.options.timeout).to be == 999
+ end
+
+ it 'should make its version number available' do
+ expect(Google::APIClient::VERSION::STRING).to be_instance_of(String)
+ end
+
+ it 'should default to OAuth 2' do
+ expect(Signet::OAuth2::Client).to be === client.authorization
+ end
+
+ describe 'configure for no authentication' do
+ before do
+ client.authorization = nil
+ end
+ it_should_behave_like 'configurable user agent'
+ end
+
+ describe 'configured for OAuth 1' do
+ before do
+ client.authorization = :oauth_1
+ client.authorization.token_credential_key = 'abc'
+ client.authorization.token_credential_secret = '123'
+ end
+
+ it 'should use the default OAuth1 client configuration' do
+ expect(client.authorization.temporary_credential_uri.to_s).to eq(
+ 'https://www.google.com/accounts/OAuthGetRequestToken'
+ )
+ expect(client.authorization.authorization_uri.to_s).to include(
+ 'https://www.google.com/accounts/OAuthAuthorizeToken'
+ )
+ expect(client.authorization.token_credential_uri.to_s).to eq(
+ 'https://www.google.com/accounts/OAuthGetAccessToken'
+ )
+ expect(client.authorization.client_credential_key).to eq('anonymous')
+ expect(client.authorization.client_credential_secret).to eq('anonymous')
+ end
+
+ it_should_behave_like 'configurable user agent'
+ end
+
+ describe 'configured for OAuth 2' do
+ before do
+ client.authorization = :oauth_2
+ client.authorization.access_token = '12345'
+ end
+
+ # TODO
+ it_should_behave_like 'configurable user agent'
+ end
+
+ describe 'when executing requests' do
+ before do
+ @prediction = client.discovered_api('prediction', 'v1.2')
+ client.authorization = :oauth_2
+ @connection = stub_connection do |stub|
+ stub.post('/prediction/v1.2/training?data=12345') do |env|
+ expect(env[:request_headers]['Authorization']).to eq('Bearer 12345')
+ [200, {}, '{}']
+ end
+ end
+ end
+
+ after do
+ @connection.verify
+ end
+
+ it 'should use default authorization' do
+ client.authorization.access_token = "12345"
+ client.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :connection => @connection
+ )
+ end
+
+ it 'should use request scoped authorization when provided' do
+ client.authorization.access_token = "abcdef"
+ new_auth = Signet::OAuth2::Client.new(:access_token => '12345')
+ client.execute(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'},
+ :authorization => new_auth,
+ :connection => @connection
+ )
+ end
+
+ it 'should accept options with batch/request style execute' do
+ client.authorization.access_token = "abcdef"
+ new_auth = Signet::OAuth2::Client.new(:access_token => '12345')
+ request = client.generate_request(
+ :api_method => @prediction.training.insert,
+ :parameters => {'data' => '12345'}
+ )
+ client.execute(
+ request,
+ :authorization => new_auth,
+ :connection => @connection
+ )
+ end
+
+
+ it 'should accept options in array style execute' do
+ client.authorization.access_token = "abcdef"
+ new_auth = Signet::OAuth2::Client.new(:access_token => '12345')
+ client.execute(
+ @prediction.training.insert, {'data' => '12345'}, '', {},
+ { :authorization => new_auth, :connection => @connection }
+ )
+ end
+ end
+
+ describe 'when retries enabled' do
+ before do
+ client.retries = 2
+ end
+
+ after do
+ @connection.verify
+ end
+
+ it 'should follow redirects' do
+ client.authorization = nil
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ [302, {'location' => 'https://www.google.com/bar'}, '{}']
+ end
+ stub.get('/bar') do |env|
+ [200, {}, '{}']
+ end
+ end
+
+ client.execute(
+ :uri => 'https://www.google.com/foo',
+ :connection => @connection
+ )
+ end
+
+ it 'should refresh tokens on 401 errors' do
+ client.authorization.access_token = '12345'
+ expect(client.authorization).to receive(:fetch_access_token!)
+
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ [401, {}, '{}']
+ end
+ stub.get('/foo') do |env|
+ [200, {}, '{}']
+ end
+ end
+
+ client.execute(
+ :uri => 'https://www.google.com/foo',
+ :connection => @connection
+ )
+ end
+
+
+ it 'should not attempt multiple token refreshes' do
+ client.authorization.access_token = '12345'
+ expect(client.authorization).to receive(:fetch_access_token!).once
+
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ [401, {}, '{}']
+ end
+ end
+
+ client.execute(
+ :uri => 'https://www.google.com/foo',
+ :connection => @connection
+ )
+ end
+
+ it 'should not retry on client errors' do
+ count = 0
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ expect(count).to eq(0)
+ count += 1
+ [403, {}, '{}']
+ end
+ end
+
+ client.execute(
+ :uri => 'https://www.google.com/foo',
+ :connection => @connection,
+ :authenticated => false
+ )
+ end
+
+ it 'should retry on 500 errors' do
+ client.authorization = nil
+
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ [500, {}, '{}']
+ end
+ stub.get('/foo') do |env|
+ [200, {}, '{}']
+ end
+ end
+
+ expect(client.execute(
+ :uri => 'https://www.google.com/foo',
+ :connection => @connection
+ ).status).to eq(200)
+
+ end
+
+ it 'should fail after max retries' do
+ client.authorization = nil
+ count = 0
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ count += 1
+ [500, {}, '{}']
+ end
+ end
+
+ expect(client.execute(
+ :uri => 'https://www.google.com/foo',
+ :connection => @connection
+ ).status).to eq(500)
+ expect(count).to eq(3)
+ end
+
+ end
+
+ describe 'when retries disabled and expired_auth_retry on (default)' do
+ before do
+ client.retries = 0
+ end
+
+ after do
+ @connection.verify
+ end
+
+ it 'should refresh tokens on 401 errors' do
+ client.authorization.access_token = '12345'
+ expect(client.authorization).to receive(:fetch_access_token!)
+
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ [401, {}, '{}']
+ end
+ stub.get('/foo') do |env|
+ [200, {}, '{}']
+ end
+ end
+
+ client.execute(
+ :uri => 'https://www.gogole.com/foo',
+ :connection => @connection
+ )
+ end
+
+ end
+
+ describe 'when retries disabled and expired_auth_retry off' do
+ before do
+ client.retries = 0
+ client.expired_auth_retry = false
+ end
+
+ it 'should not refresh tokens on 401 errors' do
+ client.authorization.access_token = '12345'
+ expect(client.authorization).not_to receive(:fetch_access_token!)
+
+ @connection = stub_connection do |stub|
+ stub.get('/foo') do |env|
+ [401, {}, '{}']
+ end
+ stub.get('/foo') do |env|
+ [200, {}, '{}']
+ end
+ end
+
+ resp = client.execute(
+ :uri => 'https://www.gogole.com/foo',
+ :connection => @connection
+ )
+
+ expect(resp.response.status).to be == 401
+ end
+
+ end
+end
diff --git a/sdk/ruby-google-api-client/spec/spec_helper.rb b/sdk/ruby-google-api-client/spec/spec_helper.rb
new file mode 100644
index 0000000000..1c64a4e8cb
--- /dev/null
+++ b/sdk/ruby-google-api-client/spec/spec_helper.rb
@@ -0,0 +1,66 @@
+$LOAD_PATH.unshift(File.expand_path('../../lib', __FILE__))
+$LOAD_PATH.uniq!
+
+require 'rspec'
+require 'faraday'
+
+begin
+ require 'simplecov'
+ require 'coveralls'
+
+ SimpleCov.formatter = Coveralls::SimpleCov::Formatter
+ SimpleCov.start
+rescue LoadError
+ # SimpleCov missing, so just run specs with no coverage.
+end
+
+Faraday::Adapter.load_middleware(:test)
+
+module Faraday
+ class Connection
+ def verify
+ if app.kind_of?(Faraday::Adapter::Test)
+ app.stubs.verify_stubbed_calls
+ else
+ raise TypeError, "Expected test adapter"
+ end
+ end
+ end
+end
+
+module ConnectionHelpers
+ def stub_connection(&block)
+ stubs = Faraday::Adapter::Test::Stubs.new do |stub|
+ block.call(stub)
+ end
+ connection = Faraday.new do |builder|
+ builder.options.params_encoder = Faraday::FlatParamsEncoder
+ builder.adapter(:test, stubs)
+ end
+ end
+end
+
+module JSONMatchers
+ class EqualsJson
+ def initialize(expected)
+ @expected = JSON.parse(expected)
+ end
+ def matches?(target)
+ @target = JSON.parse(target)
+ @target.eql?(@expected)
+ end
+ def failure_message
+ "expected #{@target.inspect} to be #{@expected}"
+ end
+ def negative_failure_message
+ "expected #{@target.inspect} not to be #{@expected}"
+ end
+ end
+
+ def be_json(expected)
+ EqualsJson.new(expected)
+ end
+end
+
+RSpec.configure do |config|
+end
diff --git a/sdk/ruby-google-api-client/yard/bin/yard-wiki b/sdk/ruby-google-api-client/yard/bin/yard-wiki
new file mode 100755
index 0000000000..61416750ec
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/bin/yard-wiki
@@ -0,0 +1,9 @@
+#!/usr/bin/env ruby
+$LOAD_PATH.unshift(
+ File.expand_path(File.join(File.dirname(__FILE__), '../lib'))
+)
+$LOAD_PATH.uniq!
+
+require 'yard/cli/wiki'
+
+YARD::CLI::Wiki.run(*ARGV)
diff --git a/sdk/ruby-google-api-client/yard/lib/yard-google-code.rb b/sdk/ruby-google-api-client/yard/lib/yard-google-code.rb
new file mode 100644
index 0000000000..cd4eba8347
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/lib/yard-google-code.rb
@@ -0,0 +1,12 @@
+$LOAD_PATH.unshift(File.expand_path(File.dirname(__FILE__)))
+$LOAD_PATH.uniq!
+
+YARD::Templates::Engine.register_template_path File.dirname(__FILE__) + '/../templates'
+require 'yard/templates/template'
+require 'yard/templates/helpers/wiki_helper'
+
+::YARD::Templates::Template.extra_includes |= [
+ YARD::Templates::Helpers::WikiHelper
+]
+
+require 'yard/serializers/wiki_serializer'
diff --git a/sdk/ruby-google-api-client/yard/lib/yard/cli/wiki.rb b/sdk/ruby-google-api-client/yard/lib/yard/cli/wiki.rb
new file mode 100644
index 0000000000..2c17393190
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/lib/yard/cli/wiki.rb
@@ -0,0 +1,44 @@
+require 'yard'
+require 'yard/serializers/wiki_serializer'
+require 'yard/cli/yardoc'
+
+module YARD
+ module CLI
+ class Wiki < Yardoc
+ # Creates a new instance of the commandline utility
+ def initialize
+ super
+ @options = SymbolHash.new(false)
+ @options.update(
+ :format => :html,
+ :template => :default,
+ :markup => :rdoc, # default is :rdoc but falls back on :none
+ :serializer => YARD::Serializers::WikiSerializer.new, # Sigh. :-(
+ :default_return => "Object",
+ :hide_void_return => false,
+ :no_highlight => false,
+ :files => [],
+ :verifier => Verifier.new
+ )
+ @visibilities = [:public]
+ @assets = {}
+ @excluded = []
+ @files = []
+ @hidden_tags = []
+ @use_cache = false
+ @use_yardopts_file = true
+ @use_document_file = true
+ @generate = true
+ @options_file = DEFAULT_YARDOPTS_FILE
+ @statistics = true
+ @list = false
+ @save_yardoc = true
+ @has_markup = false
+
+ if defined?(::Encoding) && ::Encoding.respond_to?(:default_external=)
+ ::Encoding.default_external, ::Encoding.default_internal = 'utf-8', 'utf-8'
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/yard/lib/yard/rake/wikidoc_task.rb b/sdk/ruby-google-api-client/yard/lib/yard/rake/wikidoc_task.rb
new file mode 100644
index 0000000000..573bfb4d32
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/lib/yard/rake/wikidoc_task.rb
@@ -0,0 +1,27 @@
+require 'rake'
+require 'rake/tasklib'
+require 'yard/rake/yardoc_task'
+require 'yard/cli/wiki'
+
+module YARD
+ module Rake
+ # The rake task to run {CLI::Yardoc} and generate documentation.
+ class WikidocTask < YardocTask
+ protected
+
+ # Defines the rake task
+ # @return [void]
+ def define
+ desc "Generate Wiki Documentation with YARD"
+ task(name) do
+ before.call if before.is_a?(Proc)
+ yardoc = YARD::CLI::Wiki.new
+ yardoc.parse_arguments *(options + files)
+ yardoc.options[:verifier] = verifier if verifier
+ yardoc.run
+ after.call if after.is_a?(Proc)
+ end
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/yard/lib/yard/serializers/wiki_serializer.rb b/sdk/ruby-google-api-client/yard/lib/yard/serializers/wiki_serializer.rb
new file mode 100644
index 0000000000..469c4736ed
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/lib/yard/serializers/wiki_serializer.rb
@@ -0,0 +1,68 @@
+# encoding: utf-8
+
+require 'yard/serializers/file_system_serializer'
+
+module YARD
+ module Serializers
+ ##
+ # Subclass required to get correct filename for the top level namespace.
+ # :-(
+ class WikiSerializer < FileSystemSerializer
+ # Post-process the data before serializing.
+ # Strip unnecessary whitespace.
+ # Convert stuff into more wiki-friendly stuff.
+ # FULL OF HACKS!
+ def serialize(object, data)
+ data = data.encode("UTF-8")
+ if object == "Sidebar.wiki"
+ data = data.gsub(/^#sidebar Sidebar\n/, "")
+ end
+ data = data.gsub(/\n\s*\n/, "\n")
+ # ASCII/UTF-8 erb error work-around.
+ data = data.gsub(/--/, "â")
+ data = data.gsub(/ââ/, "----")
+ data = data.gsub(/----\n----/, "----")
+ # HACK! Google Code Wiki treats blocks like blocks.
+ data = data.gsub(/\(.+)\<\/code\>/, "`\\1`")
+ super(object, data)
+ end
+
+ def serialized_path(object)
+ return object if object.is_a?(String)
+
+ if object.is_a?(CodeObjects::ExtraFileObject)
+ fspath = ['file.' + object.name + (extension.empty? ? '' : ".#{extension}")]
+ else
+ # This line is the only change of significance.
+ # Changed from 'top-level-namespace' to 'TopLevelNamespace' to
+ # conform to wiki word page naming convention.
+ objname = object != YARD::Registry.root ? object.name.to_s : "TopLevelNamespace"
+ objname += '_' + object.scope.to_s[0,1] if object.is_a?(CodeObjects::MethodObject)
+ fspath = [objname + (extension.empty? ? '' : ".#{extension}")]
+ if object.namespace && object.namespace.path != ""
+ fspath.unshift(*object.namespace.path.split(CodeObjects::NSEP))
+ end
+ end
+
+ # Don't change the filenames, it just makes it more complicated
+ # to figure out the original name.
+ #fspath.map! do |p|
+ # p.gsub(/([a-z])([A-Z])/, '\1_\2').downcase
+ #end
+
+ # Remove special chars from filenames.
+ # Windows disallows \ / : * ? " < > | but we will just remove any
+ # non alphanumeric (plus period, underscore and dash).
+ fspath.map! do |p|
+ p.gsub(/[^\w\.-]/) do |x|
+ encoded = '_'
+
+ x.each_byte { |b| encoded << ("%X" % b) }
+ encoded
+ end
+ end
+ fspath.join("")
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/yard/lib/yard/templates/helpers/wiki_helper.rb b/sdk/ruby-google-api-client/yard/lib/yard/templates/helpers/wiki_helper.rb
new file mode 100644
index 0000000000..e03dfb6681
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/lib/yard/templates/helpers/wiki_helper.rb
@@ -0,0 +1,502 @@
+require 'cgi'
+require 'rdiscount'
+
+module YARD
+ module Templates::Helpers
+ # The helper module for HTML templates.
+ module WikiHelper
+ include MarkupHelper
+
+ # @return [String] escapes text
+ def h(text)
+ out = ""
+ text = text.split(/\n/)
+ text.each_with_index do |line, i|
+ out <<
+ case line
+ when /^\s*$/; "\n\n"
+ when /^\s+\S/, /^=/; line + "\n"
+ else; line + (text[i + 1] =~ /^\s+\S/ ? "\n" : " ")
+ end
+ end
+ out.strip
+ end
+
+ # @return [String] wraps text at +col+ columns.
+ def wrap(text, col = 72)
+ text.strip.gsub(/(.{1,#{col}})( +|$\n?)|(.{1,#{col}})/, "\\1\\3\n")
+ end
+
+ # Escapes a URL
+ #
+ # @param [String] text the URL
+ # @return [String] the escaped URL
+ def urlencode(text)
+ CGI.escape(text.to_s)
+ end
+
+ def indent(text, len = 2)
+ text.gsub(/^/, ' ' * len)
+ end
+
+ def unindent(text)
+ lines = text.split("\n", -1)
+ min_indent_size = text.size
+ for line in lines
+ indent_size = (line.gsub("\t", " ") =~ /[^\s]/) || text.size
+ min_indent_size = indent_size if indent_size < min_indent_size
+ end
+ text.gsub("\t", " ").gsub(Regexp.new("^" + " " * min_indent_size), '')
+ end
+
+ # @group Converting Markup to HTML
+
+ # Turns text into HTML using +markup+ style formatting.
+ #
+ # @param [String] text the text to format
+ # @param [Symbol] markup examples are +:markdown+, +:textile+, +:rdoc+.
+ # To add a custom markup type, see {MarkupHelper}
+ # @return [String] the HTML
+ def htmlify(text, markup = options[:markup])
+ markup_meth = "html_markup_#{markup}"
+ return text unless respond_to?(markup_meth)
+ return "" unless text
+ return text unless markup
+ load_markup_provider(markup)
+ html = send(markup_meth, text)
+ if html.respond_to?(:encode)
+ html = html.force_encoding(text.encoding) # for libs that mess with encoding
+ html = html.encode(:invalid => :replace, :replace => '?')
+ end
+ html = resolve_links(html)
+ html = html.gsub(/(?:\s*)?(.+?)(?:<\/code>\s*)?<\/pre>/m) do
+ str = unindent($1).strip
+ str = html_syntax_highlight(CGI.unescapeHTML(str)) unless options[:no_highlight]
+ str
+ end unless markup == :text
+ html
+ end
+
+ # Converts Markdown to HTML
+ # @param [String] text input Markdown text
+ # @return [String] output HTML
+ # @since 0.6.0
+ def html_markup_markdown(text)
+ Markdown.new(text).to_html
+ end
+
+ # Converts Textile to HTML
+ # @param [String] text the input Textile text
+ # @return [String] output HTML
+ # @since 0.6.0
+ def html_markup_textile(text)
+ doc = markup_class(:textile).new(text)
+ doc.hard_breaks = false if doc.respond_to?(:hard_breaks=)
+ doc.to_html
+ end
+
+ # Converts plaintext to HTML
+ # @param [String] text the input text
+ # @return [String] the output HTML
+ # @since 0.6.0
+ def html_markup_text(text)
+ "" + text + " "
+ end
+
+ # Converts HTML to HTML
+ # @param [String] text input html
+ # @return [String] output HTML
+ # @since 0.6.0
+ def html_markup_html(text)
+ text
+ end
+
+ # @return [String] HTMLified text as a single line (paragraphs removed)
+ def htmlify_line(*args)
+ htmlify(*args)
+ end
+
+ # Fixes RDoc behaviour with ++ only supporting alphanumeric text.
+ #
+ # @todo Refactor into own SimpleMarkup subclass
+ def fix_typewriter(text)
+ text.gsub(/\+(?! )([^\n\+]{1,900})(?! )\+/) do
+ type_text, pre_text, no_match = $1, $`, $&
+ pre_match = pre_text.scan(%r(?(?:pre|tt|code).*?>))
+ if pre_match.last.nil? || pre_match.last.include?('/')
+ '`' + h(type_text) + '`'
+ else
+ no_match
+ end
+ end
+ end
+
+ # Don't allow -- to turn into — element. The chances of this being
+ # some --option is far more likely than the typographical meaning.
+ #
+ # @todo Refactor into own SimpleMarkup subclass
+ def fix_dash_dash(text)
+ text.gsub(/—(?=\S)/, '--')
+ end
+
+ # @group Syntax Highlighting Source Code
+
+ # Syntax highlights +source+ in language +type+.
+ #
+ # @note To support a specific language +type+, implement the method
+ # +html_syntax_highlight_TYPE+ in this class.
+ #
+ # @param [String] source the source code to highlight
+ # @param [Symbol] type the language type (:ruby, :plain, etc). Use
+ # :plain for no syntax highlighting.
+ # @return [String] the highlighted source
+ def html_syntax_highlight(source, type = nil)
+ return "" unless source
+ return "{{{\n#{source}\n}}}"
+ end
+
+ # @return [String] unhighlighted source
+ def html_syntax_highlight_plain(source)
+ return "" unless source
+ return "{{{\n#{source}\n}}}"
+ end
+
+ # @group Linking Objects and URLs
+
+ # Resolves any text in the form of +{Name}+ to the object specified by
+ # Name. Also supports link titles in the form +{Name title}+.
+ #
+ # @example Linking to an instance method
+ # resolve_links("{MyClass#method}") # => "MyClass#method "
+ # @example Linking to a class with a title
+ # resolve_links("{A::B::C the C class}") # => "the c class "
+ # @param [String] text the text to resolve links in
+ # @return [String] HTML with linkified references
+ def resolve_links(text)
+ code_tags = 0
+ text.gsub(/<(\/)?(pre|code|tt)|\{(\S+?)(?:\s(.*?\S))?\}(?=[\W<]|.+<\/|$)/) do |str|
+ closed, tag, name, title, match = $1, $2, $3, $4, $&
+ if tag
+ code_tags += (closed ? -1 : 1)
+ next str
+ end
+ next str unless code_tags == 0
+
+ next(match) if name[0,1] == '|'
+ if object.is_a?(String)
+ object
+ else
+ link = linkify(name, title)
+ if link == name || link == title
+ match = /(.+)?(\{#{Regexp.quote name}(?:\s.*?)?\})(.+)?/.match(text)
+ file = (@file ? @file : object.file) || '(unknown)'
+ line = (@file ? 1 : (object.docstring.line_range ? object.docstring.line_range.first : 1)) + (match ? $`.count("\n") : 0)
+ log.warn "In file `#{file}':#{line}: Cannot resolve link to #{name} from text" + (match ? ":" : ".")
+ log.warn((match[1] ? '...' : '') + match[2].gsub("\n","") + (match[3] ? '...' : '')) if match
+ end
+
+ link
+ end
+ end
+ end
+
+ def unlink(value)
+ value.gsub(/\b(([A-Z][a-z]+){2,99})\b/, "!\\1")
+ end
+
+ # (see BaseHelper#link_file)
+ def link_file(filename, title = nil, anchor = nil)
+ link_url(url_for_file(filename, anchor), title)
+ end
+
+ # (see BaseHelper#link_include_object)
+ def link_include_object(obj)
+ htmlify(obj.docstring)
+ end
+
+ # (see BaseHelper#link_object)
+ def link_object(obj, otitle = nil, anchor = nil, relative = true)
+ return otitle if obj.nil?
+ obj = Registry.resolve(object, obj, true, true) if obj.is_a?(String)
+ if !otitle && obj.root?
+ title = "Top Level Namespace"
+ elsif otitle
+ # title = "`" + otitle.to_s + "`"
+ title = otitle.to_s
+ elsif object.is_a?(CodeObjects::Base)
+ # title = "`" + h(object.relative_path(obj)) + "`"
+ title = h(object.relative_path(obj))
+ else
+ # title = "`" + h(obj.to_s) + "`"
+ title = h(obj.to_s)
+ end
+ unless serializer
+ return unlink(title)
+ end
+ return unlink(title) if obj.is_a?(CodeObjects::Proxy)
+
+ link = url_for(obj, anchor, relative)
+ if link
+ link_url(link, title, :formatted => false)
+ else
+ unlink(title)
+ end
+ end
+
+ # (see BaseHelper#link_url)
+ def link_url(url, title = nil, params = {})
+ title ||= url
+ if url.to_s == ""
+ title
+ else
+ if params[:formatted]
+ "#{title} "
+ else
+ "[#{url} #{title}]"
+ end
+ end
+ end
+
+ # @group URL Helpers
+
+ # @param [CodeObjects::Base] object the object to get an anchor for
+ # @return [String] the anchor for a specific object
+ def anchor_for(object)
+ # Method:_Google::APIClient#execute!
+ case object
+ when CodeObjects::MethodObject
+ if object.scope == :instance
+ "Method:_#{object.path}"
+ elsif object.scope == :class
+ "Method:_#{object.path}"
+ end
+ when CodeObjects::ClassVariableObject
+ "#{object.name.to_s.gsub('@@', '')}-#{object.type}"
+ when CodeObjects::Base
+ "#{object.name}-#{object.type}"
+ when CodeObjects::Proxy
+ object.path
+ else
+ object.to_s
+ end
+ end
+
+ # Returns the URL for an object.
+ #
+ # @param [String, CodeObjects::Base] obj the object (or object path) to link to
+ # @param [String] anchor the anchor to link to
+ # @param [Boolean] relative use a relative or absolute link
+ # @return [String] the URL location of the object
+ def url_for(obj, anchor = nil, relative = true)
+ link = nil
+ return link unless serializer
+ if obj.kind_of?(CodeObjects::Base) && obj.root?
+ return 'TopLevelNamespace'
+ end
+
+ if obj.is_a?(CodeObjects::Base) && !obj.is_a?(CodeObjects::NamespaceObject)
+ # If the obj is not a namespace obj make it the anchor.
+ anchor, obj = obj, obj.namespace
+ end
+
+ objpath = serializer.serialized_path(obj)
+ return link unless objpath
+
+ if relative
+ fromobj = object
+ if object.is_a?(CodeObjects::Base) &&
+ !object.is_a?(CodeObjects::NamespaceObject)
+ fromobj = fromobj.namespace
+ end
+
+ from = serializer.serialized_path(fromobj)
+ link = File.relative_path(from, objpath)
+ else
+ link = objpath
+ end
+
+ return (
+ link.gsub(/\.html$/, '').gsub(/\.wiki$/, '') +
+ (anchor ? '#' + urlencode(anchor_for(anchor)) : '')
+ )
+ end
+
+ # Returns the URL for a specific file
+ #
+ # @param [String] filename the filename to link to
+ # @param [String] anchor optional anchor
+ # @return [String] the URL pointing to the file
+ def url_for_file(filename, anchor = nil)
+ fromobj = object
+ if CodeObjects::Base === fromobj && !fromobj.is_a?(CodeObjects::NamespaceObject)
+ fromobj = fromobj.namespace
+ end
+ from = serializer.serialized_path(fromobj)
+ if filename == options[:readme]
+ filename = 'Documentation'
+ else
+ filename = File.basename(filename).gsub(/\.[^.]+$/, '').capitalize
+ end
+ link = File.relative_path(from, filename)
+ return (
+ link.gsub(/\.html$/, '').gsub(/\.wiki$/, '') +
+ (anchor ? '#' + urlencode(anchor) : '')
+ )
+ end
+
+ # @group Formatting Objects and Attributes
+
+ # Formats a list of objects and links them
+ # @return [String] a formatted list of objects
+ def format_object_name_list(objects)
+ objects.sort_by {|o| o.name.to_s.downcase }.map do |o|
+ "" + linkify(o, o.name) + " "
+ end.join(", ")
+ end
+
+ # Formats a list of types from a tag.
+ #
+ # @param [Array, FalseClass] typelist
+ # the list of types to be formatted.
+ #
+ # @param [Boolean] brackets omits the surrounding
+ # brackets if +brackets+ is set to +false+.
+ #
+ # @return [String] the list of types formatted
+ # as [Type1, Type2, ...] with the types linked
+ # to their respective descriptions.
+ #
+ def format_types(typelist, brackets = true)
+ return unless typelist.is_a?(Array)
+ list = typelist.map do |type|
+ type = type.gsub(/([<>])/) { h($1) }
+ type = type.gsub(/([\w:]+)/) do
+ $1 == "lt" || $1 == "gt" ? "`#{$1}`" : linkify($1, $1)
+ end
+ end
+ list.empty? ? "" : (brackets ? "(#{list.join(", ")})" : list.join(", "))
+ end
+
+ # Get the return types for a method signature.
+ #
+ # @param [CodeObjects::MethodObject] meth the method object
+ # @param [Boolean] link whether to link the types
+ # @return [String] the signature types
+ # @since 0.5.3
+ def signature_types(meth, link = true)
+ meth = convert_method_to_overload(meth)
+
+ type = options[:default_return] || ""
+ if meth.tag(:return) && meth.tag(:return).types
+ types = meth.tags(:return).map {|t| t.types ? t.types : [] }.flatten.uniq
+ first = link ? h(types.first) : format_types([types.first], false)
+ if types.size == 2 && types.last == 'nil'
+ type = first + '? '
+ elsif types.size == 2 && types.last =~ /^(Array)?<#{Regexp.quote types.first}>$/
+ type = first + '+ '
+ elsif types.size > 2
+ type = [first, '...'].join(', ')
+ elsif types == ['void'] && options[:hide_void_return]
+ type = ""
+ else
+ type = link ? h(types.join(", ")) : format_types(types, false)
+ end
+ elsif !type.empty?
+ type = link ? h(type) : format_types([type], false)
+ end
+ type = "(#{type.to_s.strip}) " unless type.empty?
+ type
+ end
+
+ # Formats the signature of method +meth+.
+ #
+ # @param [CodeObjects::MethodObject] meth the method object to list
+ # the signature of
+ # @param [Boolean] link whether to link the method signature to the details view
+ # @param [Boolean] show_extras whether to show extra meta-data (visibility, attribute info)
+ # @param [Boolean] full_attr_name whether to show the full attribute name
+ # ("name=" instead of "name")
+ # @return [String] the formatted method signature
+ def signature(meth, link = true, show_extras = true, full_attr_name = true)
+ meth = convert_method_to_overload(meth)
+
+ type = signature_types(meth, link)
+ name = full_attr_name ? meth.name : meth.name.to_s.gsub(/^(\w+)=$/, '\1')
+ blk = format_block(meth)
+ args = !full_attr_name && meth.writer? ? "" : format_args(meth)
+ extras = []
+ extras_text = ''
+ if show_extras
+ if rw = meth.attr_info
+ attname = [rw[:read] ? 'read' : nil, rw[:write] ? 'write' : nil].compact
+ attname = attname.size == 1 ? attname.join('') + 'only' : nil
+ extras << attname if attname
+ end
+ extras << meth.visibility if meth.visibility != :public
+ extras_text = ' ' unless extras.empty?
+ end
+ title = "%s *`%s`* `%s` `%s`" % [type, h(name.to_s).strip, args, blk]
+ title.gsub!(//, "")
+ title.gsub!(/<\/tt>/, "")
+ title.gsub!(/`\s*`/, "")
+ title.strip!
+ if link
+ if meth.is_a?(YARD::CodeObjects::MethodObject)
+ link_title =
+ "#{h meth.name(true)} (#{meth.scope} #{meth.type})"
+ else
+ link_title = "#{h name} (#{meth.type})"
+ end
+ # This has to be raw HTML, can't wiki-format a link title otherwise.
+ "#{title} #{extras_text}"
+ else
+ title + extras_text
+ end
+ end
+
+ # @group Getting the Character Encoding
+
+ # Returns the current character set. The default value can be overridden
+ # by setting the +LANG+ environment variable or by overriding this
+ # method. In Ruby 1.9 you can also modify this value by setting
+ # +Encoding.default_external+.
+ #
+ # @return [String] the current character set
+ # @since 0.5.4
+ def charset
+ return 'utf-8' unless RUBY19 || lang = ENV['LANG']
+ if RUBY19
+ lang = Encoding.default_external.name.downcase
+ else
+ lang = lang.downcase.split('.').last
+ end
+ case lang
+ when "ascii-8bit", "us-ascii", "ascii-7bit"; 'iso-8859-1'
+ else; lang
+ end
+ end
+
+ # @endgroup
+
+ private
+
+ # Converts a set of hash options into HTML attributes for a tag
+ #
+ # @param [Hash{String => String}] opts the tag options
+ # @return [String] the tag attributes of an HTML tag
+ def tag_attrs(opts = {})
+ opts.sort_by {|k, v| k.to_s }.map {|k,v| "#{k}=#{v.to_s.inspect}" if v }.join(" ")
+ end
+
+ # Converts a {CodeObjects::MethodObject} into an overload object
+ # @since 0.5.3
+ def convert_method_to_overload(meth)
+ # use first overload tag if it has a return type and method itself does not
+ if !meth.tag(:return) && meth.tags(:overload).size == 1 && meth.tag(:overload).tag(:return)
+ return meth.tag(:overload)
+ end
+ meth
+ end
+ end
+ end
+end
diff --git a/sdk/ruby-google-api-client/yard/templates/default/class/setup.rb b/sdk/ruby-google-api-client/yard/templates/default/class/setup.rb
new file mode 100644
index 0000000000..0b4dc12f82
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/templates/default/class/setup.rb
@@ -0,0 +1,43 @@
+lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+require 'yard-google-code'
+
+include T('default/module')
+
+def init
+ super
+ sections.place(:subclasses).before(:children)
+ sections.place(:constructor_details, [T('method_details')]).before(:methodmissing)
+ # Weird bug w/ doubled sections
+ sections.uniq!
+end
+
+def constructor_details
+ ctors = object.meths(:inherited => true, :included => true)
+ return unless @ctor = ctors.find {|o| o.name == :initialize }
+ return if prune_method_listing([@ctor]).empty?
+ erb(:constructor_details)
+end
+
+def subclasses
+ return if object.path == "Object" # don't show subclasses for Object
+ unless globals.subclasses
+ globals.subclasses = {}
+ list = run_verifier Registry.all(:class)
+ list.each do |o|
+ (globals.subclasses[o.superclass.path] ||= []) << o if o.superclass
+ end
+ end
+
+ @subclasses = globals.subclasses[object.path]
+ return if @subclasses.nil? || @subclasses.empty?
+ @subclasses = @subclasses.sort_by {|o| o.path }.map do |child|
+ name = child.path
+ if object.namespace
+ name = object.relative_path(child)
+ end
+ [name, child]
+ end
+ erb(:subclasses)
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/yard/templates/default/docstring/setup.rb b/sdk/ruby-google-api-client/yard/templates/default/docstring/setup.rb
new file mode 100644
index 0000000000..63a5877fb1
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/templates/default/docstring/setup.rb
@@ -0,0 +1,54 @@
+lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+require 'yard-google-code'
+
+def init
+ return if object.docstring.blank? && !object.has_tag?(:api)
+ sections :index, [:private, :deprecated, :abstract, :todo, :note, :returns_void, :text], T('tags')
+end
+
+def private
+ return unless object.has_tag?(:api) && object.tag(:api).text == 'private'
+ erb(:private)
+end
+
+def abstract
+ return unless object.has_tag?(:abstract)
+ erb(:abstract)
+end
+
+def deprecated
+ return unless object.has_tag?(:deprecated)
+ erb(:deprecated)
+end
+
+def todo
+ return unless object.has_tag?(:todo)
+ erb(:todo)
+end
+
+def note
+ return unless object.has_tag?(:note)
+ erb(:note)
+end
+
+def returns_void
+ return unless object.type == :method
+ return if object.name == :initialize && object.scope == :instance
+ return unless object.tags(:return).size == 1 && object.tag(:return).types == ['void']
+ erb(:returns_void)
+end
+
+def docstring_text
+ text = ""
+ unless object.tags(:overload).size == 1 && object.docstring.empty?
+ text = object.docstring
+ end
+
+ if text.strip.empty? && object.tags(:return).size == 1 && object.tag(:return).text
+ text = object.tag(:return).text.gsub(/\A([a-z])/) {|x| x.upcase }
+ end
+
+ text.strip
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/yard/templates/default/method/setup.rb b/sdk/ruby-google-api-client/yard/templates/default/method/setup.rb
new file mode 100644
index 0000000000..a6ed299243
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/templates/default/method/setup.rb
@@ -0,0 +1,8 @@
+lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+require 'yard-google-code'
+
+def init
+ sections :header, [T('method_details')]
+end
\ No newline at end of file
diff --git a/sdk/ruby-google-api-client/yard/templates/default/method_details/setup.rb b/sdk/ruby-google-api-client/yard/templates/default/method_details/setup.rb
new file mode 100644
index 0000000000..e3bfea0030
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/templates/default/method_details/setup.rb
@@ -0,0 +1,8 @@
+lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+require 'yard-google-code'
+
+def init
+ sections :header, [:method_signature, T('docstring')]
+end
diff --git a/sdk/ruby-google-api-client/yard/templates/default/module/setup.rb b/sdk/ruby-google-api-client/yard/templates/default/module/setup.rb
new file mode 100644
index 0000000000..d2559eaa42
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/templates/default/module/setup.rb
@@ -0,0 +1,133 @@
+lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+require 'yard-google-code'
+
+include Helpers::ModuleHelper
+
+def init
+ sections :header, :box_info, :pre_docstring, T('docstring'), :children,
+ :constant_summary, [T('docstring')], :inherited_constants,
+ :inherited_methods,
+ :methodmissing, [T('method_details')],
+ :attribute_details, [T('method_details')],
+ :method_details_list, [T('method_details')]
+end
+
+def pre_docstring
+ return if object.docstring.blank?
+ erb(:pre_docstring)
+end
+
+def children
+ @inner = [[:modules, []], [:classes, []]]
+ object.children.each do |child|
+ @inner[0][1] << child if child.type == :module
+ @inner[1][1] << child if child.type == :class
+ end
+ @inner.map! {|v| [v[0], run_verifier(v[1].sort_by {|o| o.name.to_s })] }
+ return if (@inner[0][1].size + @inner[1][1].size) == 0
+ erb(:children)
+end
+
+def methodmissing
+ mms = object.meths(:inherited => true, :included => true)
+ return unless @mm = mms.find {|o| o.name == :method_missing && o.scope == :instance }
+ erb(:methodmissing)
+end
+
+def method_listing(include_specials = true)
+ return @smeths ||= method_listing.reject {|o| special_method?(o) } unless include_specials
+ return @meths if @meths
+ @meths = object.meths(:inherited => false, :included => false)
+ @meths = sort_listing(prune_method_listing(@meths))
+ @meths
+end
+
+def special_method?(meth)
+ return true if meth.name(true) == '#method_missing'
+ return true if meth.constructor?
+ false
+end
+
+def attr_listing
+ return @attrs if @attrs
+ @attrs = []
+ [:class, :instance].each do |scope|
+ object.attributes[scope].each do |name, rw|
+ @attrs << (rw[:read] || rw[:write])
+ end
+ end
+ @attrs = sort_listing(prune_method_listing(@attrs, false))
+end
+
+def constant_listing
+ return @constants if @constants
+ @constants = object.constants(:included => false, :inherited => false)
+ @constants += object.cvars
+ @constants = run_verifier(@constants)
+ @constants
+end
+
+def sort_listing(list)
+ list.sort_by {|o| [o.scope.to_s, o.name.to_s.downcase] }
+end
+
+def docstring_full(obj)
+ docstring = ""
+ if obj.tags(:overload).size == 1 && obj.docstring.empty?
+ docstring = obj.tag(:overload).docstring
+ else
+ docstring = obj.docstring
+ end
+
+ if docstring.summary.empty? && obj.tags(:return).size == 1 && obj.tag(:return).text
+ docstring = Docstring.new(obj.tag(:return).text.gsub(/\A([a-z])/) {|x| x.upcase }.strip)
+ end
+
+ docstring
+end
+
+def docstring_summary(obj)
+ docstring_full(obj).summary
+end
+
+def groups(list, type = "Method")
+ if groups_data = object.groups
+ others = list.select {|m| !m.group }
+ groups_data.each do |name|
+ items = list.select {|m| m.group == name }
+ yield(items, name) unless items.empty?
+ end
+ else
+ others = []
+ group_data = {}
+ list.each do |meth|
+ if meth.group
+ (group_data[meth.group] ||= []) << meth
+ else
+ others << meth
+ end
+ end
+ group_data.each {|group, items| yield(items, group) unless items.empty? }
+ end
+
+ scopes(others) {|items, scope| yield(items, "#{scope.to_s.capitalize} #{type} Summary") }
+end
+
+def scopes(list)
+ [:class, :instance].each do |scope|
+ items = list.select {|m| m.scope == scope }
+ yield(items, scope) unless items.empty?
+ end
+end
+
+def mixed_into(object)
+ unless globals.mixed_into
+ globals.mixed_into = {}
+ list = run_verifier Registry.all(:class, :module)
+ list.each {|o| o.mixins.each {|m| (globals.mixed_into[m.path] ||= []) << o } }
+ end
+
+ globals.mixed_into[object.path] || []
+end
diff --git a/sdk/ruby-google-api-client/yard/templates/default/tags/setup.rb b/sdk/ruby-google-api-client/yard/templates/default/tags/setup.rb
new file mode 100644
index 0000000000..33dc42cacc
--- /dev/null
+++ b/sdk/ruby-google-api-client/yard/templates/default/tags/setup.rb
@@ -0,0 +1,55 @@
+lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+require 'yard-google-code'
+
+def init
+ tags = Tags::Library.visible_tags - [:abstract, :deprecated, :note, :todo]
+ create_tag_methods(tags - [:example, :option, :overload, :see])
+ sections :index, tags
+ sections.any(:overload).push(T('docstring'))
+end
+
+def return
+ if object.type == :method
+ return if object.name == :initialize && object.scope == :instance
+ return if object.tags(:return).size == 1 && object.tag(:return).types == ['void']
+ end
+ tag(:return)
+end
+
+private
+
+def tag(name, opts = nil)
+ return unless object.has_tag?(name)
+ opts ||= options_for_tag(name)
+ @no_names = true if opts[:no_names]
+ @no_types = true if opts[:no_types]
+ @name = name
+ out = erb('tag')
+ @no_names, @no_types = nil, nil
+ out
+end
+
+def create_tag_methods(tags)
+ tags.each do |tag|
+ next if respond_to?(tag)
+ instance_eval(<<-eof, __FILE__, __LINE__ + 1)
+ def #{tag}; tag(#{tag.inspect}) end
+ eof
+ end
+end
+
+def options_for_tag(tag)
+ opts = {:no_types => true, :no_names => true}
+ case Tags::Library.factory_method_for(tag)
+ when :with_types
+ opts[:no_types] = false
+ when :with_types_and_name
+ opts[:no_types] = false
+ opts[:no_names] = false
+ when :with_name
+ opts[:no_names] = false
+ end
+ opts
+end
diff --git a/sdk/ruby/Gemfile b/sdk/ruby/Gemfile
index 1972df614e..ca9ab24d78 100644
--- a/sdk/ruby/Gemfile
+++ b/sdk/ruby/Gemfile
@@ -5,6 +5,5 @@
source 'https://rubygems.org'
gemspec
gem 'rake'
-gem 'minitest', '>= 5.0.0'
-gem 'mocha', require: false
-gem 'signet', '<= 0.11'
+gem 'minitest', '>= 5'
+gem 'mocha', '>= 2.1', require: false
diff --git a/sdk/ruby/arvados.gemspec b/sdk/ruby/arvados.gemspec
index b196a1c33e..ea5ff8c7c5 100644
--- a/sdk/ruby/arvados.gemspec
+++ b/sdk/ruby/arvados.gemspec
@@ -37,18 +37,15 @@ Gem::Specification.new do |s|
s.files = ["lib/arvados.rb", "lib/arvados/google_api_client.rb",
"lib/arvados/collection.rb", "lib/arvados/keep.rb",
"README", "LICENSE-2.0.txt"]
- s.required_ruby_version = '>= 1.8.7'
+ s.required_ruby_version = '>= 2.7.0'
s.add_dependency('activesupport', '>= 3')
s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
- # Our google-api-client dependency used to be < 0.9, but that could be
- # satisfied by the buggy 0.9.pre*, cf. https://dev.arvados.org/issues/9213
- # We need at least version 0.8.7.3, cf. https://dev.arvados.org/issues/15673
- s.add_dependency('arvados-google-api-client', '>= 0.8.7.3', '< 0.8.9')
+ # arvados fork of google-api-client gem with old API and new
+ # compatibility fixes, built from ../ruby-google-api-client/
+ s.add_dependency('arvados-google-api-client', '>= 0.8.7.5', '< 0.8.8')
# work around undeclared dependency on i18n in some activesupport 3.x.x:
- s.add_dependency('i18n', '~> 0')
+ s.add_dependency('i18n')
s.add_dependency('json', '>= 1.7.7', '<3')
- # Avoid warning on Ruby 2.7, cf. https://dev.arvados.org/issues/18247
- s.add_dependency('faraday', '>= 0.17.4')
s.add_runtime_dependency('jwt', '<2', '>= 0.1.5')
s.homepage =
'https://arvados.org'
diff --git a/sdk/ruby/lib/arvados.rb b/sdk/ruby/lib/arvados.rb
index a72a9f8241..63550cd37c 100644
--- a/sdk/ruby/lib/arvados.rb
+++ b/sdk/ruby/lib/arvados.rb
@@ -7,6 +7,7 @@ require 'active_support/inflector'
require 'json'
require 'fileutils'
require 'andand'
+require 'net/http'
require 'arvados/google_api_client'
@@ -20,7 +21,7 @@ class Arvados
attr_reader :request_id
def execute(*args)
- @request_id = "req-" + Random::DEFAULT.rand(2**128).to_s(36)[0..19]
+ @request_id = "req-" + Random.new.rand(2**128).to_s(36)[0..19]
if args.last.is_a? Hash
args.last[:headers] ||= {}
args.last[:headers]['X-Request-Id'] = @request_id
@@ -75,7 +76,7 @@ class Arvados
_arvados = self
namespace_class = Arvados.const_set "A#{self.object_id}", Class.new
self.arvados_api.schemas.each do |classname, schema|
- next if classname.match /List$/
+ next if classname.match(/List$/)
klass = Class.new(Arvados::Model) do
def self.arvados
@arvados
@@ -136,7 +137,7 @@ class Arvados
end
def debuglog *args
- self.class.debuglog *args
+ self.class.debuglog(*args)
end
def config(config_file_path="~/.config/arvados/settings.conf")
@@ -188,6 +189,15 @@ class Arvados
@config = config
end
+ def cluster_config
+ return @cluster_config if @cluster_config
+
+ uri = URI("https://#{config()["ARVADOS_API_HOST"]}/arvados/v1/config")
+ cc = JSON.parse(Net::HTTP.get(uri))
+
+ @cluster_config = cc
+ end
+
class Model
def self.arvados_api
arvados.arvados_api
@@ -196,10 +206,10 @@ class Arvados
arvados.client
end
def self.debuglog(*args)
- arvados.class.debuglog *args
+ arvados.class.debuglog(*args)
end
def debuglog(*args)
- self.class.arvados.class.debuglog *args
+ self.class.arvados.class.debuglog(*args)
end
def self.api_exec(method, parameters={})
api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)
diff --git a/sdk/ruby/lib/arvados/keep.rb b/sdk/ruby/lib/arvados/keep.rb
index 458af53a74..e391b7a6ca 100644
--- a/sdk/ruby/lib/arvados/keep.rb
+++ b/sdk/ruby/lib/arvados/keep.rb
@@ -248,7 +248,7 @@ module Keep
end
# Verify that a given manifest is valid according to
- # https://arvados.org/projects/arvados/wiki/Keep_manifest_format
+ # https://dev.arvados.org/projects/arvados/wiki/Keep_manifest_format
def self.validate! manifest
raise ArgumentError.new "No manifest found" if !manifest
diff --git a/sdk/ruby/test/sdk_fixtures.rb b/sdk/ruby/test/sdk_fixtures.rb
index 0f385e2218..28f12b0b02 100644
--- a/sdk/ruby/test/sdk_fixtures.rb
+++ b/sdk/ruby/test/sdk_fixtures.rb
@@ -33,7 +33,7 @@ module SDKFixtures
file = IO.read(path)
trim_index = file.index('# Test Helper trims the rest of the file')
file = file[0, trim_index] if trim_index
- YAML.load(file)
+ YAML.safe_load(file, permitted_classes: [Time])
end
end
diff --git a/sdk/ruby/test/test_keep_manifest.rb b/sdk/ruby/test/test_keep_manifest.rb
index eee8b39699..ff0cab6ef9 100644
--- a/sdk/ruby/test/test_keep_manifest.rb
+++ b/sdk/ruby/test/test_keep_manifest.rb
@@ -357,8 +357,6 @@ class ManifestTest < Minitest::Test
"invalid file token \"0:0:a/./bc.txt\""],
[false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:a/../bc.txt\n",
"invalid file token \"0:0:a/../bc.txt\""],
- [false, "./abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
- "invalid stream name \"./abc/./foo\""],
[false, "d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n",
"invalid stream name \"d41d8cd98f00b204e9800998ecf8427e+0\""],
[false, ". d41d8cd98f00b204e9800998ecf8427 0:0:abc.txt\n",
diff --git a/sdk/ruby/test/test_request_id.rb b/sdk/ruby/test/test_request_id.rb
index 2e25210ba7..641b442751 100644
--- a/sdk/ruby/test/test_request_id.rb
+++ b/sdk/ruby/test/test_request_id.rb
@@ -17,6 +17,6 @@ class RequestIdTest < Minitest::Test
arv.collection.get(uuid: "zzzzz-4zz18-zzzzzzzzzzzzzzz")
end
assert clnt.request_id != nil
- assert_match /Uh-oh.*\(Request ID: req-[0-9a-zA-Z]{20}\)/, err.message
+ assert_match(/Uh-oh.*\(Request ID: req-[0-9a-zA-Z]{20}\)/, err.message)
end
-end
\ No newline at end of file
+end
diff --git a/services/api/Gemfile b/services/api/Gemfile
index 9b401cc6ac..9cc5f1b7bc 100644
--- a/services/api/Gemfile
+++ b/services/api/Gemfile
@@ -4,25 +4,19 @@
source 'https://rubygems.org'
-gem 'rails', '~> 5.2.0'
-gem 'responders', '~> 2.0'
-
-# Pin sprockets to < 4.0 to avoid issues when upgrading rails to 5.2
-# See: https://github.com/rails/sprockets-rails/issues/443
-gem 'sprockets', '~> 3.0'
+gem 'rails', '~> 7.0.0'
+gem 'responders'
+gem 'i18n'
+gem 'sprockets-rails'
group :test, :development do
gem 'factory_bot_rails'
-
- # As of now (2019-03-27) There's an open issue about incompatibilities with
- # newer versions of this gem: https://github.com/rails/rails-perftest/issues/38
- gem 'ruby-prof', '~> 0.15.0'
-
+ gem 'ruby-prof'
# Note: "require: false" here tells bunder not to automatically
# 'require' the packages during application startup. Installation is
# still mandatory.
- gem 'test-unit', '~> 3.0', require: false
- gem 'simplecov', '~> 0.7.1', require: false
+ gem 'test-unit', require: false
+ gem 'simplecov', require: false
gem 'simplecov-rcov', require: false
gem 'mocha', require: false
gem 'byebug'
@@ -49,12 +43,9 @@ gem 'optimist'
gem 'themes_for_rails', git: 'https://github.com/arvados/themes_for_rails'
-# Import arvados gem.
-gem 'arvados', '~> 2.1.5'
+gem 'arvados', '~> 2.7.0.rc1'
gem 'httpclient'
-gem 'sshkey'
-gem 'safe_yaml'
gem 'lograge'
gem 'logstash-event'
@@ -63,9 +54,9 @@ gem 'rails-observers'
gem 'rails-perftest'
gem 'rails-controller-testing'
-# arvados-google-api-client and googleauth depend on signet, but
-# signet 0.12 is incompatible with ruby 2.3.
-gem 'signet', '< 0.12'
+gem 'webrick'
+
+gem 'mini_portile2', '~> 2.8', '>= 2.8.1'
# Install any plugin gems
Dir.glob(File.join(File.dirname(__FILE__), 'lib', '**', "Gemfile")) do |f|
diff --git a/services/api/Gemfile.lock b/services/api/Gemfile.lock
index 6bc53be4f8..8b1bbdfc87 100644
--- a/services/api/Gemfile.lock
+++ b/services/api/Gemfile.lock
@@ -8,225 +8,280 @@ GIT
GEM
remote: https://rubygems.org/
specs:
- actioncable (5.2.8.1)
- actionpack (= 5.2.8.1)
+ actioncable (7.0.8.1)
+ actionpack (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
nio4r (~> 2.0)
websocket-driver (>= 0.6.1)
- actionmailer (5.2.8.1)
- actionpack (= 5.2.8.1)
- actionview (= 5.2.8.1)
- activejob (= 5.2.8.1)
+ actionmailbox (7.0.8.1)
+ actionpack (= 7.0.8.1)
+ activejob (= 7.0.8.1)
+ activerecord (= 7.0.8.1)
+ activestorage (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
+ mail (>= 2.7.1)
+ net-imap
+ net-pop
+ net-smtp
+ actionmailer (7.0.8.1)
+ actionpack (= 7.0.8.1)
+ actionview (= 7.0.8.1)
+ activejob (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
mail (~> 2.5, >= 2.5.4)
+ net-imap
+ net-pop
+ net-smtp
rails-dom-testing (~> 2.0)
- actionpack (5.2.8.1)
- actionview (= 5.2.8.1)
- activesupport (= 5.2.8.1)
- rack (~> 2.0, >= 2.0.8)
+ actionpack (7.0.8.1)
+ actionview (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
+ rack (~> 2.0, >= 2.2.4)
rack-test (>= 0.6.3)
rails-dom-testing (~> 2.0)
- rails-html-sanitizer (~> 1.0, >= 1.0.2)
- actionview (5.2.8.1)
- activesupport (= 5.2.8.1)
+ rails-html-sanitizer (~> 1.0, >= 1.2.0)
+ actiontext (7.0.8.1)
+ actionpack (= 7.0.8.1)
+ activerecord (= 7.0.8.1)
+ activestorage (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
+ globalid (>= 0.6.0)
+ nokogiri (>= 1.8.5)
+ actionview (7.0.8.1)
+ activesupport (= 7.0.8.1)
builder (~> 3.1)
erubi (~> 1.4)
rails-dom-testing (~> 2.0)
- rails-html-sanitizer (~> 1.0, >= 1.0.3)
- activejob (5.2.8.1)
- activesupport (= 5.2.8.1)
+ rails-html-sanitizer (~> 1.1, >= 1.2.0)
+ activejob (7.0.8.1)
+ activesupport (= 7.0.8.1)
globalid (>= 0.3.6)
- activemodel (5.2.8.1)
- activesupport (= 5.2.8.1)
- activerecord (5.2.8.1)
- activemodel (= 5.2.8.1)
- activesupport (= 5.2.8.1)
- arel (>= 9.0)
- activestorage (5.2.8.1)
- actionpack (= 5.2.8.1)
- activerecord (= 5.2.8.1)
- marcel (~> 1.0.0)
- activesupport (5.2.8.1)
+ activemodel (7.0.8.1)
+ activesupport (= 7.0.8.1)
+ activerecord (7.0.8.1)
+ activemodel (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
+ activestorage (7.0.8.1)
+ actionpack (= 7.0.8.1)
+ activejob (= 7.0.8.1)
+ activerecord (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
+ marcel (~> 1.0)
+ mini_mime (>= 1.1.0)
+ activesupport (7.0.8.1)
concurrent-ruby (~> 1.0, >= 1.0.2)
- i18n (>= 0.7, < 2)
- minitest (~> 5.1)
- tzinfo (~> 1.1)
+ i18n (>= 1.6, < 2)
+ minitest (>= 5.1)
+ tzinfo (~> 2.0)
acts_as_api (1.0.1)
activemodel (>= 3.0.0)
activesupport (>= 3.0.0)
rack (>= 1.1.0)
- addressable (2.8.0)
- public_suffix (>= 2.0.2, < 5.0)
+ addressable (2.8.6)
+ public_suffix (>= 2.0.2, < 6.0)
andand (1.3.3)
- arel (9.0.0)
- arvados (2.1.5)
+ arvados (2.7.0.rc2)
activesupport (>= 3)
andand (~> 1.3, >= 1.3.3)
- arvados-google-api-client (>= 0.7, < 0.8.9)
- faraday (< 0.16)
- i18n (~> 0)
+ arvados-google-api-client (>= 0.8.7.5, < 0.8.8)
+ i18n
json (>= 1.7.7, < 3)
jwt (>= 0.1.5, < 2)
- arvados-google-api-client (0.8.7.4)
- activesupport (>= 3.2, < 5.3)
+ arvados-google-api-client (0.8.7.6)
+ activesupport (>= 3.2, < 8.0)
addressable (~> 2.3)
autoparse (~> 0.3)
extlib (~> 0.9)
- faraday (~> 0.9)
- googleauth (~> 0.3)
+ faraday (~> 2.8.0)
+ faraday-gzip (~> 2.0)
+ faraday-multipart (~> 1.0)
+ googleauth (~> 1.0)
launchy (~> 2.4)
multi_json (~> 1.10)
retriable (~> 1.4)
- signet (~> 0.6)
+ signet (~> 0.16.0)
autoparse (0.3.3)
addressable (>= 2.3.1)
extlib (>= 0.9.15)
multi_json (>= 1.0.0)
+ base64 (0.2.0)
builder (3.2.4)
- byebug (11.0.1)
- concurrent-ruby (1.1.10)
+ byebug (11.1.3)
+ concurrent-ruby (1.2.3)
crass (1.0.6)
- erubi (1.10.0)
+ date (3.3.4)
+ docile (1.4.0)
+ erubi (1.12.0)
extlib (0.9.16)
- factory_bot (5.0.2)
- activesupport (>= 4.2.0)
- factory_bot_rails (5.0.1)
- factory_bot (~> 5.0.0)
- railties (>= 4.2.0)
- faraday (0.15.4)
- multipart-post (>= 1.2, < 3)
- ffi (1.9.25)
- globalid (1.0.0)
- activesupport (>= 5.0)
- googleauth (0.9.0)
- faraday (~> 0.12)
+ factory_bot (6.2.1)
+ activesupport (>= 5.0.0)
+ factory_bot_rails (6.2.0)
+ factory_bot (~> 6.2.0)
+ railties (>= 5.0.0)
+ faraday (2.8.1)
+ base64
+ faraday-net_http (>= 2.0, < 3.1)
+ ruby2_keywords (>= 0.0.4)
+ faraday-gzip (2.0.1)
+ faraday (>= 1.0)
+ zlib (~> 3.0)
+ faraday-multipart (1.0.4)
+ multipart-post (~> 2)
+ faraday-net_http (3.0.2)
+ ffi (1.15.5)
+ globalid (1.2.1)
+ activesupport (>= 6.1)
+ google-cloud-env (2.1.1)
+ faraday (>= 1.0, < 3.a)
+ googleauth (1.9.2)
+ faraday (>= 1.0, < 3.a)
+ google-cloud-env (~> 2.1)
jwt (>= 1.4, < 3.0)
- memoist (~> 0.16)
multi_json (~> 1.11)
os (>= 0.9, < 2.0)
- signet (~> 0.7)
+ signet (>= 0.16, < 2.a)
httpclient (2.8.3)
- i18n (0.9.5)
+ i18n (1.14.4)
concurrent-ruby (~> 1.0)
- jquery-rails (4.3.3)
+ jquery-rails (4.6.0)
rails-dom-testing (>= 1, < 3)
railties (>= 4.2.0)
thor (>= 0.14, < 2.0)
- json (2.5.1)
+ json (2.6.3)
jwt (1.5.6)
- launchy (2.5.0)
- addressable (~> 2.7)
- listen (3.2.1)
+ launchy (2.5.2)
+ addressable (~> 2.8)
+ listen (3.8.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
- lograge (0.10.0)
+ lograge (0.13.0)
actionpack (>= 4)
activesupport (>= 4)
railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- loofah (2.18.0)
+ loofah (2.22.0)
crass (~> 1.0.2)
- nokogiri (>= 1.5.9)
- mail (2.7.1)
+ nokogiri (>= 1.12.0)
+ mail (2.8.1)
mini_mime (>= 0.1.1)
- marcel (1.0.2)
- memoist (0.16.2)
- metaclass (0.0.4)
+ net-imap
+ net-pop
+ net-smtp
+ marcel (1.0.4)
method_source (1.0.0)
- mini_mime (1.1.2)
- mini_portile2 (2.8.0)
+ mini_mime (1.1.5)
+ mini_portile2 (2.8.5)
minitest (5.10.3)
- mocha (1.8.0)
- metaclass (~> 0.0.1)
+ mocha (2.1.0)
+ ruby2_keywords (>= 0.0.5)
multi_json (1.15.0)
- multipart-post (2.1.1)
- nio4r (2.5.8)
- nokogiri (1.13.7)
- mini_portile2 (~> 2.8.0)
+ multipart-post (2.4.0)
+ net-imap (0.3.7)
+ date
+ net-protocol
+ net-pop (0.1.2)
+ net-protocol
+ net-protocol (0.2.2)
+ timeout
+ net-smtp (0.5.0)
+ net-protocol
+ nio4r (2.7.1)
+ nokogiri (1.15.6)
+ mini_portile2 (~> 2.8.2)
racc (~> 1.4)
- oj (3.9.2)
- optimist (3.0.0)
- os (1.1.1)
- passenger (6.0.2)
+ oj (3.16.1)
+ optimist (3.1.0)
+ os (1.1.4)
+ passenger (6.0.18)
rack
rake (>= 0.8.1)
- pg (1.1.4)
- power_assert (1.1.4)
- public_suffix (4.0.6)
- racc (1.6.0)
- rack (2.2.4)
- rack-test (2.0.2)
+ pg (1.5.4)
+ power_assert (2.0.3)
+ public_suffix (5.0.4)
+ racc (1.7.3)
+ rack (2.2.9)
+ rack-test (2.1.0)
rack (>= 1.3)
- rails (5.2.8.1)
- actioncable (= 5.2.8.1)
- actionmailer (= 5.2.8.1)
- actionpack (= 5.2.8.1)
- actionview (= 5.2.8.1)
- activejob (= 5.2.8.1)
- activemodel (= 5.2.8.1)
- activerecord (= 5.2.8.1)
- activestorage (= 5.2.8.1)
- activesupport (= 5.2.8.1)
- bundler (>= 1.3.0)
- railties (= 5.2.8.1)
- sprockets-rails (>= 2.0.0)
- rails-controller-testing (1.0.4)
- actionpack (>= 5.0.1.x)
- actionview (>= 5.0.1.x)
- activesupport (>= 5.0.1.x)
- rails-dom-testing (2.0.3)
- activesupport (>= 4.2.0)
+ rails (7.0.8.1)
+ actioncable (= 7.0.8.1)
+ actionmailbox (= 7.0.8.1)
+ actionmailer (= 7.0.8.1)
+ actionpack (= 7.0.8.1)
+ actiontext (= 7.0.8.1)
+ actionview (= 7.0.8.1)
+ activejob (= 7.0.8.1)
+ activemodel (= 7.0.8.1)
+ activerecord (= 7.0.8.1)
+ activestorage (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
+ bundler (>= 1.15.0)
+ railties (= 7.0.8.1)
+ rails-controller-testing (1.0.5)
+ actionpack (>= 5.0.1.rc1)
+ actionview (>= 5.0.1.rc1)
+ activesupport (>= 5.0.1.rc1)
+ rails-dom-testing (2.2.0)
+ activesupport (>= 5.0.0)
+ minitest
nokogiri (>= 1.6)
- rails-html-sanitizer (1.4.3)
- loofah (~> 2.3)
+ rails-html-sanitizer (1.6.0)
+ loofah (~> 2.21)
+ nokogiri (~> 1.14)
rails-observers (0.1.5)
activemodel (>= 4.0)
rails-perftest (0.0.7)
- railties (5.2.8.1)
- actionpack (= 5.2.8.1)
- activesupport (= 5.2.8.1)
+ railties (7.0.8.1)
+ actionpack (= 7.0.8.1)
+ activesupport (= 7.0.8.1)
method_source
- rake (>= 0.8.7)
- thor (>= 0.19.0, < 2.0)
- rake (13.0.6)
- rb-fsevent (0.10.3)
- rb-inotify (0.9.10)
- ffi (>= 0.5.0, < 2)
- request_store (1.4.1)
+ rake (>= 12.2)
+ thor (~> 1.0)
+ zeitwerk (~> 2.5)
+ rake (13.2.1)
+ rb-fsevent (0.11.2)
+ rb-inotify (0.10.1)
+ ffi (~> 1.0)
+ request_store (1.5.1)
rack (>= 1.4)
- responders (2.4.1)
- actionpack (>= 4.2.0, < 6.0)
- railties (>= 4.2.0, < 6.0)
+ responders (3.1.0)
+ actionpack (>= 5.2)
+ railties (>= 5.2)
retriable (1.4.1)
- ruby-prof (0.15.9)
- safe_yaml (1.0.5)
- signet (0.11.0)
- addressable (~> 2.3)
- faraday (~> 0.9)
+ ruby-prof (1.6.3)
+ ruby2_keywords (0.0.5)
+ signet (0.16.1)
+ addressable (~> 2.8)
+ faraday (>= 0.17.5, < 3.0)
jwt (>= 1.5, < 3.0)
multi_json (~> 1.10)
- simplecov (0.7.1)
- multi_json (~> 1.0)
- simplecov-html (~> 0.7.1)
- simplecov-html (0.7.1)
- simplecov-rcov (0.2.3)
+ simplecov (0.22.0)
+ docile (~> 1.1)
+ simplecov-html (~> 0.11)
+ simplecov_json_formatter (~> 0.1)
+ simplecov-html (0.12.3)
+ simplecov-rcov (0.3.1)
simplecov (>= 0.4.1)
- sprockets (3.7.2)
+ simplecov_json_formatter (0.1.4)
+ sprockets (4.2.1)
concurrent-ruby (~> 1.0)
- rack (> 1, < 3)
+ rack (>= 2.2.4, < 4)
sprockets-rails (3.4.2)
actionpack (>= 5.2)
activesupport (>= 5.2)
sprockets (>= 3.0.0)
- sshkey (2.0.0)
- test-unit (3.3.1)
+ test-unit (3.6.1)
power_assert
- thor (1.2.1)
- thread_safe (0.3.6)
- tzinfo (1.2.10)
- thread_safe (~> 0.1)
- websocket-driver (0.7.5)
+ thor (1.3.1)
+ timeout (0.4.1)
+ tzinfo (2.0.6)
+ concurrent-ruby (~> 1.0)
+ webrick (1.8.1)
+ websocket-driver (0.7.6)
websocket-extensions (>= 0.1.0)
websocket-extensions (0.1.5)
+ zeitwerk (2.6.13)
+ zlib (3.1.0)
PLATFORMS
ruby
@@ -234,14 +289,16 @@ PLATFORMS
DEPENDENCIES
acts_as_api
andand
- arvados (~> 2.1.5)
+ arvados (~> 2.7.0.rc1)
byebug
factory_bot_rails
httpclient
+ i18n
jquery-rails
listen
lograge
logstash-event
+ mini_portile2 (~> 2.8, >= 2.8.1)
minitest (= 5.10.3)
mocha
multi_json
@@ -249,20 +306,18 @@ DEPENDENCIES
optimist
passenger
pg (~> 1.0)
- rails (~> 5.2.0)
+ rails (~> 7.0.0)
rails-controller-testing
rails-observers
rails-perftest
- responders (~> 2.0)
- ruby-prof (~> 0.15.0)
- safe_yaml
- signet (< 0.12)
- simplecov (~> 0.7.1)
+ responders
+ ruby-prof
+ simplecov
simplecov-rcov
- sprockets (~> 3.0)
- sshkey
- test-unit (~> 3.0)
+ sprockets-rails
+ test-unit
themes_for_rails!
+ webrick
BUNDLED WITH
- 2.2.19
+ 2.4.22
diff --git a/services/api/app/assets/config/manifest.js b/services/api/app/assets/config/manifest.js
new file mode 100644
index 0000000000..ceb233892c
--- /dev/null
+++ b/services/api/app/assets/config/manifest.js
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+//= link_tree ../images
+//= link_directory ../javascripts .js
+//= link_directory ../stylesheets .css
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index 4625ef654c..b1e2a4008f 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -29,6 +29,9 @@ class ApplicationController < ActionController::Base
include DbCurrentTime
respond_to :json
+
+ # Although CSRF protection is already enabled by default, this is
+ # still needed to reposition CSRF checks later in callback order.
protect_from_forgery
ERROR_ACTIONS = [:render_error, :render_not_found]
@@ -46,7 +49,8 @@ class ApplicationController < ActionController::Base
before_action :load_limit_offset_order_params, only: [:index, :contents]
before_action :load_select_param
before_action(:find_object_by_uuid,
- except: [:index, :create] + ERROR_ACTIONS)
+ except: [:index, :create, :update] + ERROR_ACTIONS)
+ before_action :find_object_for_update, only: [:update]
before_action :load_where_param, only: [:index, :contents]
before_action :load_filters_param, only: [:index, :contents]
before_action :find_objects_for_index, :only => :index
@@ -100,7 +104,7 @@ class ApplicationController < ActionController::Base
end
def show
- send_json @object.as_api_response(nil, select: @select)
+ send_json @object.as_api_response(nil, select: select_for_klass(@select, model_class))
end
def create
@@ -119,7 +123,7 @@ class ApplicationController < ActionController::Base
attrs_to_update = resource_attrs.reject { |k,v|
[:kind, :etag, :href].index k
}
- @object.update_attributes! attrs_to_update
+ @object.update! attrs_to_update
show
end
@@ -227,6 +231,24 @@ class ApplicationController < ActionController::Base
@objects = model_class.apply_filters(@objects, @filters)
end
+ def select_for_klass sel, model_class, raise_unknown=true
+ return nil if sel.nil?
+ # Filter the select fields to only the ones that apply to the
+ # given class.
+ sel.map do |column|
+ sp = column.split(".")
+ if sp.length == 2 && sp[0] == model_class.table_name && model_class.selectable_attributes.include?(sp[1])
+ sp[1]
+ elsif model_class.selectable_attributes.include? column
+ column
+ elsif raise_unknown
+ raise ArgumentError.new("Invalid attribute '#{column}' of #{model_class.name} in select parameter")
+ else
+ nil
+ end
+ end.compact
+ end
+
def apply_where_limit_order_params model_class=nil
model_class ||= self.model_class
apply_filters model_class
@@ -290,7 +312,7 @@ class ApplicationController < ActionController::Base
# Map attribute names in @select to real column names, resolve
# those to fully-qualified SQL column names, and pass the
# resulting string to the select method.
- columns_list = model_class.columns_for_attributes(@select).
+ columns_list = model_class.columns_for_attributes(select_for_klass @select, model_class).
map { |s| "#{ar_table_name}.#{ActiveRecord::Base.connection.quote_column_name s}" }
@objects = @objects.select(columns_list.join(", "))
end
@@ -316,7 +338,7 @@ class ApplicationController < ActionController::Base
return if @limit == 0 || @limit == 1
model_class ||= self.model_class
limit_columns = model_class.limit_index_columns_read
- limit_columns &= model_class.columns_for_attributes(@select) if @select
+ limit_columns &= model_class.columns_for_attributes(select_for_klass @select, model_class) if @select
return if limit_columns.empty?
model_class.transaction do
limit_query = @objects.
@@ -464,7 +486,11 @@ class ApplicationController < ActionController::Base
controller_name
end
- def find_object_by_uuid
+ def find_object_for_update
+ find_object_by_uuid(with_lock: true)
+ end
+
+ def find_object_by_uuid(with_lock: false)
if params[:id] and params[:id].match(/\D/)
params[:uuid] = params.delete :id
end
@@ -474,8 +500,23 @@ class ApplicationController < ActionController::Base
@orders = []
@filters = []
@objects = nil
+
+ # This is a little hacky but sometimes the fields the user wants
+ # to selecting on are unrelated to the object being loaded here,
+ # for example groups#contents, so filter the fields that will be
+ # used in find_objects_for_index and then reset afterwards. In
+ # some cases, code that modifies the @select list needs to set
+ # @preserve_select.
+ @preserve_select = @select
+ @select = select_for_klass(@select, self.model_class, false)
+
find_objects_for_index
- @object = @objects.first
+ if with_lock && Rails.configuration.API.LockBeforeUpdate
+ @object = @objects.lock.first
+ else
+ @object = @objects.first
+ end
+ @select = @preserve_select
end
def nullable_attributes
diff --git a/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb b/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
index 22bcb6c1d5..da7e11cd9f 100644
--- a/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
+++ b/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
@@ -128,7 +128,7 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
super
end
- def find_object_by_uuid
+ def find_object_by_uuid(with_lock: false)
uuid_param = params[:uuid] || params[:id]
if (uuid_param != current_api_client_authorization.andand.uuid &&
!Thread.current[:api_client].andand.is_trusted)
@@ -140,7 +140,11 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
@where = {}
@filters = [['uuid', '=', uuid_param]]
find_objects_for_index
- @object = @objects.first
+ query = @objects
+ if with_lock && Rails.configuration.API.LockBeforeUpdate
+ query = query.lock
+ end
+ @object = query.first
end
def current_api_client_is_trusted
diff --git a/services/api/app/controllers/arvados/v1/collections_controller.rb b/services/api/app/controllers/arvados/v1/collections_controller.rb
index c9b36e19ed..ad1771a87e 100644
--- a/services/api/app/controllers/arvados/v1/collections_controller.rb
+++ b/services/api/app/controllers/arvados/v1/collections_controller.rb
@@ -60,7 +60,7 @@ class Arvados::V1::CollectionsController < ApplicationController
super
end
- def find_object_by_uuid
+ def find_object_by_uuid(with_lock: false)
if loc = Keep::Locator.parse(params[:id])
loc.strip_hints!
@@ -81,7 +81,11 @@ class Arvados::V1::CollectionsController < ApplicationController
# available lifetime.
select_attrs = (@select || ["manifest_text"]) | ["portable_data_hash", "trash_at"]
- if c = Collection.
+ model = Collection
+ if with_lock && Rails.configuration.API.LockBeforeUpdate
+ model = model.lock
+ end
+ if c = model.
readable_by(*@read_users, opts).
where({ portable_data_hash: loc.to_s }).
order("trash_at desc").
@@ -98,7 +102,7 @@ class Arvados::V1::CollectionsController < ApplicationController
end
end
else
- super
+ super(with_lock: with_lock)
end
end
@@ -179,12 +183,12 @@ class Arvados::V1::CollectionsController < ApplicationController
end
end
- Container.readable_by(*@read_users).where(output: loc.to_s).each do |c|
- search_edges(visited, c.uuid, :search_up)
+ Container.readable_by(*@read_users).where(output: loc.to_s).pluck(:uuid).each do |c_uuid|
+ search_edges(visited, c_uuid, :search_up)
end
- Container.readable_by(*@read_users).where(log: loc.to_s).each do |c|
- search_edges(visited, c.uuid, :search_up)
+ Container.readable_by(*@read_users).where(log: loc.to_s).pluck(:uuid).each do |c_uuid|
+ search_edges(visited, c_uuid, :search_up)
end
elsif direction == :search_down
if loc.to_s == "d41d8cd98f00b204e9800998ecf8427e+0"
@@ -203,7 +207,7 @@ class Arvados::V1::CollectionsController < ApplicationController
end
end
- Container.readable_by(*@read_users).where([Container.full_text_trgm + " like ?", "%#{loc.to_s}%"]).each do |c|
+ Container.readable_by(*@read_users).where([Container.full_text_trgm + " like ?", "%#{loc.to_s}%"]).select("output, log, uuid").each do |c|
if c.output != loc.to_s && c.log != loc.to_s
search_edges(visited, c.uuid, :search_down)
end
@@ -272,12 +276,12 @@ class Arvados::V1::CollectionsController < ApplicationController
end
end
- ContainerRequest.readable_by(*@read_users).where(output_uuid: uuid).each do |cr|
- search_edges(visited, cr.uuid, :search_up)
+ ContainerRequest.readable_by(*@read_users).where(output_uuid: uuid).pluck(:uuid).each do |cr_uuid|
+ search_edges(visited, cr_uuid, :search_up)
end
- ContainerRequest.readable_by(*@read_users).where(log_uuid: uuid).each do |cr|
- search_edges(visited, cr.uuid, :search_up)
+ ContainerRequest.readable_by(*@read_users).where(log_uuid: uuid).pluck(:uuid).each do |cr_uuid|
+ search_edges(visited, cr_uuid, :search_up)
end
elsif direction == :search_down
search_edges(visited, c.portable_data_hash, :search_down)
diff --git a/services/api/app/controllers/arvados/v1/container_requests_controller.rb b/services/api/app/controllers/arvados/v1/container_requests_controller.rb
index 07b8098f5b..f99a0a55a9 100644
--- a/services/api/app/controllers/arvados/v1/container_requests_controller.rb
+++ b/services/api/app/controllers/arvados/v1/container_requests_controller.rb
@@ -2,6 +2,8 @@
#
# SPDX-License-Identifier: AGPL-3.0
+require 'update_priorities'
+
class Arvados::V1::ContainerRequestsController < ApplicationController
accept_attribute_as_json :environment, Hash
accept_attribute_as_json :mounts, Hash
@@ -28,4 +30,36 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
},
})
end
+
+ def self._container_status_requires_parameters
+ (super rescue {}).
+ merge({
+ uuid: {
+ type: 'string', required: true, description: "The UUID of the ContainerRequest in question.",
+ },
+ })
+ end
+
+ # This API is handled entirely by controller, so this method is
+ # never called -- it's only here for the sake of adding the API to
+ # the generated discovery document.
+ def container_status
+ send_json({"errors" => "controller-only API, not handled by rails"}, status: 400)
+ end
+
+ def update
+ if (resource_attrs.keys.map(&:to_sym) - [:owner_uuid, :name, :description, :properties]).empty? or @object.container_uuid.nil?
+ # If no attributes are being updated besides these, there are no
+ # cascading changes to other rows/tables, the only lock will be
+ # the single row lock on SQL UPDATE.
+ super
+ else
+ # Get locks ahead of time to avoid deadlock in cascading priority
+ # update
+ Container.transaction do
+ row_lock_for_priority_update @object.container_uuid
+ super
+ end
+ end
+ end
end
diff --git a/services/api/app/controllers/arvados/v1/containers_controller.rb b/services/api/app/controllers/arvados/v1/containers_controller.rb
index 041f559472..13aa478d26 100644
--- a/services/api/app/controllers/arvados/v1/containers_controller.rb
+++ b/services/api/app/controllers/arvados/v1/containers_controller.rb
@@ -2,6 +2,8 @@
#
# SPDX-License-Identifier: AGPL-3.0
+require 'update_priorities'
+
class Arvados::V1::ContainersController < ApplicationController
accept_attribute_as_json :environment, Hash
accept_attribute_as_json :mounts, Hash
@@ -29,8 +31,18 @@ class Arvados::V1::ContainersController < ApplicationController
end
def update
- @object.with_lock do
+ if (resource_attrs.keys.map(&:to_sym) - [:cost, :gateway_address, :output_properties, :progress, :runtime_status]).empty?
+ # If no attributes are being updated besides these, there are no
+ # cascading changes to other rows/tables, the only lock will the
+ # single row lock on SQL UPDATE.
super
+ else
+ Container.transaction do
+ # Get locks ahead of time to avoid deadlock in cascading priority
+ # update
+ row_lock_for_priority_update @object.uuid
+ super
+ end
end
end
@@ -39,7 +51,17 @@ class Arvados::V1::ContainersController < ApplicationController
if action_name == 'lock' || action_name == 'unlock'
# Avoid loading more fields than we need
@objects = @objects.select(:id, :uuid, :state, :priority, :auth_uuid, :locked_by_uuid, :lock_count)
- @select = %w(uuid state priority auth_uuid locked_by_uuid)
+ # This gets called from within find_object_by_uuid.
+ # find_object_by_uuid stores the original value of @select in
+ # @preserve_select, edits the value of @select, calls
+ # find_objects_for_index, then restores @select from the value
+ # of @preserve_select. So if we want our updated value of
+ # @select here to stick, we have to set @preserve_select.
+ @select = @preserve_select = %w(uuid state priority auth_uuid locked_by_uuid)
+ elsif action_name == 'update_priority'
+ # We're going to reload in update_priority!, which will select
+ # all attributes, but will fail if we don't select :id now.
+ @objects = @objects.select(:id, :uuid)
end
end
@@ -53,6 +75,11 @@ class Arvados::V1::ContainersController < ApplicationController
show
end
+ def update_priority
+ @object.update_priority!
+ show
+ end
+
def current
if Thread.current[:api_client_authorization].nil?
send_error("Not logged in", status: 401)
diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
index e9bc006a36..c362cf32d7 100644
--- a/services/api/app/controllers/arvados/v1/groups_controller.rb
+++ b/services/api/app/controllers/arvados/v1/groups_controller.rb
@@ -46,7 +46,6 @@ class Arvados::V1::GroupsController < ApplicationController
type: 'boolean', required: false, default: false, description: 'Include past collection versions.',
}
})
- params.delete(:select)
params
end
@@ -93,7 +92,7 @@ class Arvados::V1::GroupsController < ApplicationController
attrs_to_update = resource_attrs.reject { |k, v|
[:kind, :etag, :href].index k
}.merge({async_permissions_update: true})
- @object.update_attributes!(attrs_to_update)
+ @object.update!(attrs_to_update)
@object.save!
render_accepted
else
@@ -260,6 +259,20 @@ class Arvados::V1::GroupsController < ApplicationController
end
end
+ # Check that any fields in @select are valid for at least one class
+ if @select
+ all_attributes = []
+ klasses.each do |klass|
+ all_attributes.concat klass.selectable_attributes
+ end
+ @select.each do |check|
+ if !all_attributes.include? check
+ raise ArgumentError.new "Invalid attribute '#{check}' in select"
+ end
+ end
+ end
+ any_selections = @select
+
included_by_uuid = {}
seen_last_class = false
@@ -291,14 +304,21 @@ class Arvados::V1::GroupsController < ApplicationController
request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i || r !~ /\./ } ||
klass.default_orders.join(", ")
- @select = nil
+ @select = select_for_klass any_selections, klass, false
+
where_conds = filter_by_owner
- if klass == Collection
+ if klass == Collection && @select.nil?
@select = klass.selectable_attributes - ["manifest_text", "unsigned_manifest_text"]
elsif klass == Group
where_conds = where_conds.merge(group_class: ["project","filter"])
end
+ # Make signed manifest_text not selectable because controller
+ # currently doesn't know to sign it.
+ if @select
+ @select = @select - ["manifest_text"]
+ end
+
@filters = request_filters.map do |col, op, val|
if !col.index('.')
[col, op, val]
diff --git a/services/api/app/controllers/arvados/v1/links_controller.rb b/services/api/app/controllers/arvados/v1/links_controller.rb
index 7716a3d5cf..c956bfc9b4 100644
--- a/services/api/app/controllers/arvados/v1/links_controller.rb
+++ b/services/api/app/controllers/arvados/v1/links_controller.rb
@@ -20,6 +20,42 @@ class Arvados::V1::LinksController < ApplicationController
resource_attrs.delete :head_kind
resource_attrs.delete :tail_kind
+
+ if resource_attrs[:link_class] == 'permission' && Link::PermLevel[resource_attrs[:name]]
+ existing = Link.
+ lock. # select ... for update
+ where(link_class: 'permission',
+ tail_uuid: resource_attrs[:tail_uuid],
+ head_uuid: resource_attrs[:head_uuid],
+ name: Link::PermLevel.keys).first
+ if existing
+ @object = existing
+ if Link::PermLevel[resource_attrs[:name]] > Link::PermLevel[existing.name]
+ # upgrade existing permission link to the requested level.
+ return update
+ else
+ # no-op: existing permission is already greater or equal to
+ # the newly requested permission.
+ return show
+ end
+ end
+ elsif resource_attrs[:link_class] == 'permission' &&
+ resource_attrs[:name] == 'can_login' &&
+ resource_attrs[:properties].respond_to?(:has_key?) &&
+ resource_attrs[:properties].has_key?(:username)
+ existing = Link.
+ lock. # select ... for update
+ where(link_class: 'permission',
+ tail_uuid: resource_attrs[:tail_uuid],
+ head_uuid: resource_attrs[:head_uuid]).
+ where('properties @> ?', SafeJSON.dump({'username' => resource_attrs[:properties][:username]})).
+ first
+ if existing
+ @object = existing
+ return show
+ end
+ end
+
super
end
@@ -38,7 +74,7 @@ class Arvados::V1::LinksController < ApplicationController
protected
- def find_object_by_uuid
+ def find_object_by_uuid(with_lock: false)
if params[:id] && params[:id].match(/\D/)
params[:uuid] = params.delete :id
end
@@ -49,7 +85,7 @@ class Arvados::V1::LinksController < ApplicationController
.where(uuid: params[:uuid])
.first
elsif !current_user
- super
+ super(with_lock: with_lock)
else
# The usual permission-filtering index query is unnecessarily
# inefficient, and doesn't match all permission links that
@@ -57,11 +93,15 @@ class Arvados::V1::LinksController < ApplicationController
# by UUID, then check whether (a) its tail_uuid is the current
# user or (b) its head_uuid is an object the current_user
# can_manage.
- link = Link.unscoped.where(uuid: params[:uuid]).first
+ model = Link
+ if with_lock && Rails.configuration.API.LockBeforeUpdate
+ model = model.lock
+ end
+ link = model.unscoped.where(uuid: params[:uuid]).first
if link && link.link_class != 'permission'
# Not a permission link. Re-fetch using generic
# permission-filtering query.
- super
+ super(with_lock: with_lock)
elsif link && (current_user.uuid == link.tail_uuid ||
current_user.can?(manage: link.head_uuid))
# Permission granted.
diff --git a/services/api/app/controllers/arvados/v1/nodes_controller.rb b/services/api/app/controllers/arvados/v1/nodes_controller.rb
index eb72b7096d..2510fd49fa 100644
--- a/services/api/app/controllers/arvados/v1/nodes_controller.rb
+++ b/services/api/app/controllers/arvados/v1/nodes_controller.rb
@@ -37,7 +37,7 @@ class Arvados::V1::NodesController < ApplicationController
attrs_to_update = resource_attrs.reject { |k,v|
[:kind, :etag, :href].index k
}
- @object.update_attributes!(attrs_to_update)
+ @object.update!(attrs_to_update)
@object.assign_slot if params[:assign_slot]
@object.save!
show
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 0300b75075..74aa4078cb 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -24,213 +24,212 @@ class Arvados::V1::SchemaController < ApplicationController
protected
def discovery_doc
- Rails.cache.fetch 'arvados_v1_rest_discovery' do
- Rails.application.eager_load!
- remoteHosts = {}
- Rails.configuration.RemoteClusters.each {|k,v| if k != :"*" then remoteHosts[k] = v["Host"] end }
- discovery = {
- kind: "discovery#restDescription",
- discoveryVersion: "v1",
- id: "arvados:v1",
- name: "arvados",
- version: "v1",
- # format is YYYYMMDD, must be fixed width (needs to be lexically
- # sortable), updated manually, may be used by clients to
- # determine availability of API server features.
- revision: "20220510",
- source_version: AppVersion.hash,
- sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
- packageVersion: AppVersion.package_version,
- generatedAt: db_current_time.iso8601,
- title: "Arvados API",
- description: "The API to interact with Arvados.",
- documentationLink: "http://doc.arvados.org/api/index.html",
- defaultCollectionReplication: Rails.configuration.Collections.DefaultReplication,
- protocol: "rest",
- baseUrl: root_url + "arvados/v1/",
- basePath: "/arvados/v1/",
- rootUrl: root_url,
- servicePath: "arvados/v1/",
- batchPath: "batch",
- uuidPrefix: Rails.configuration.ClusterID,
- defaultTrashLifetime: Rails.configuration.Collections.DefaultTrashLifetime,
- blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL,
- maxRequestSize: Rails.configuration.API.MaxRequestSize,
- maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,
- dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats.keys,
- crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent,
- crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents,
- crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod,
- crunchLogThrottleBytes: Rails.configuration.Containers.Logging.LogThrottleBytes,
- crunchLogThrottleLines: Rails.configuration.Containers.Logging.LogThrottleLines,
- crunchLimitLogBytesPerJob: Rails.configuration.Containers.Logging.LimitLogBytesPerJob,
- crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod,
- crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod,
- crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize,
- remoteHosts: remoteHosts,
- remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy,
- websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,
- workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
- workbench2Url: Rails.configuration.Services.Workbench2.ExternalURL.to_s,
- keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
- gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
- parameters: {
- alt: {
+ Rails.application.eager_load!
+ remoteHosts = {}
+ Rails.configuration.RemoteClusters.each {|k,v| if k != :"*" then remoteHosts[k] = v["Host"] end }
+ discovery = {
+ kind: "discovery#restDescription",
+ discoveryVersion: "v1",
+ id: "arvados:v1",
+ name: "arvados",
+ version: "v1",
+ # format is YYYYMMDD, must be fixed width (needs to be lexically
+ # sortable), updated manually, may be used by clients to
+ # determine availability of API server features.
+ revision: "20231117",
+ source_version: AppVersion.hash,
+ sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
+ packageVersion: AppVersion.package_version,
+ generatedAt: db_current_time.iso8601,
+ title: "Arvados API",
+ description: "The API to interact with Arvados.",
+ documentationLink: "http://doc.arvados.org/api/index.html",
+ defaultCollectionReplication: Rails.configuration.Collections.DefaultReplication,
+ protocol: "rest",
+ baseUrl: root_url + "arvados/v1/",
+ basePath: "/arvados/v1/",
+ rootUrl: root_url,
+ servicePath: "arvados/v1/",
+ batchPath: "batch",
+ uuidPrefix: Rails.configuration.ClusterID,
+ defaultTrashLifetime: Rails.configuration.Collections.DefaultTrashLifetime,
+ blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL,
+ maxRequestSize: Rails.configuration.API.MaxRequestSize,
+ maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,
+ dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats.keys,
+ crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent,
+ crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents,
+ crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod,
+ crunchLogThrottleBytes: Rails.configuration.Containers.Logging.LogThrottleBytes,
+ crunchLogThrottleLines: Rails.configuration.Containers.Logging.LogThrottleLines,
+ crunchLimitLogBytesPerJob: Rails.configuration.Containers.Logging.LimitLogBytesPerJob,
+ crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod,
+ crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod,
+ crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize,
+ remoteHosts: remoteHosts,
+ remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy,
+ websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,
+ workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
+ workbench2Url: Rails.configuration.Services.Workbench2.ExternalURL.to_s,
+ keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
+ gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
+ parameters: {
+ alt: {
+ type: "string",
+ description: "Data format for the response.",
+ default: "json",
+ enum: [
+ "json"
+ ],
+ enumDescriptions: [
+ "Responses with Content-Type of application/json"
+ ],
+ location: "query"
+ },
+ fields: {
+ type: "string",
+ description: "Selector specifying which fields to include in a partial response.",
+ location: "query"
+ },
+ key: {
+ type: "string",
+ description: "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ location: "query"
+ },
+ oauth_token: {
+ type: "string",
+ description: "OAuth 2.0 token for the current user.",
+ location: "query"
+ }
+ },
+ auth: {
+ oauth2: {
+ scopes: {
+ "https://api.arvados.org/auth/arvados" => {
+ description: "View and manage objects"
+ },
+ "https://api.arvados.org/auth/arvados.readonly" => {
+ description: "View objects"
+ }
+ }
+ }
+ },
+ schemas: {},
+ resources: {}
+ }
+
+ ActiveRecord::Base.descendants.reject(&:abstract_class?).sort_by(&:to_s).each do |k|
+ begin
+ ctl_class = "Arvados::V1::#{k.to_s.pluralize}Controller".constantize
+ rescue
+ # No controller -> no discovery.
+ next
+ end
+ object_properties = {}
+ k.columns.
+ select { |col| k.selectable_attributes.include? col.name }.
+ collect do |col|
+ if k.serialized_attributes.has_key? col.name
+ object_properties[col.name] = {
+ type: k.serialized_attributes[col.name].object_class.to_s
+ }
+ elsif k.attribute_types[col.name].is_a? JsonbType::Hash
+ object_properties[col.name] = {
+ type: Hash.to_s
+ }
+ elsif k.attribute_types[col.name].is_a? JsonbType::Array
+ object_properties[col.name] = {
+ type: Array.to_s
+ }
+ else
+ object_properties[col.name] = {
+ type: col.type
+ }
+ end
+ end
+ discovery[:schemas][k.to_s + 'List'] = {
+ id: k.to_s + 'List',
+ description: k.to_s + ' list',
+ type: "object",
+ properties: {
+ kind: {
type: "string",
- description: "Data format for the response.",
- default: "json",
- enum: [
- "json"
- ],
- enumDescriptions: [
- "Responses with Content-Type of application/json"
- ],
- location: "query"
+ description: "Object type. Always arvados##{k.to_s.camelcase(:lower)}List.",
+ default: "arvados##{k.to_s.camelcase(:lower)}List"
+ },
+ etag: {
+ type: "string",
+ description: "List version."
+ },
+ items: {
+ type: "array",
+ description: "The list of #{k.to_s.pluralize}.",
+ items: {
+ "$ref" => k.to_s
+ }
},
- fields: {
+ next_link: {
type: "string",
- description: "Selector specifying which fields to include in a partial response.",
- location: "query"
+ description: "A link to the next page of #{k.to_s.pluralize}."
},
- key: {
+ next_page_token: {
type: "string",
- description: "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- location: "query"
+ description: "The page token for the next page of #{k.to_s.pluralize}."
},
- oauth_token: {
+ selfLink: {
type: "string",
- description: "OAuth 2.0 token for the current user.",
- location: "query"
+ description: "A link back to this list."
}
- },
- auth: {
- oauth2: {
- scopes: {
- "https://api.arvados.org/auth/arvados" => {
- description: "View and manage objects"
- },
- "https://api.arvados.org/auth/arvados.readonly" => {
- description: "View objects"
- }
- }
+ }
+ }
+ discovery[:schemas][k.to_s] = {
+ id: k.to_s,
+ description: k.to_s,
+ type: "object",
+ uuidPrefix: (k.respond_to?(:uuid_prefix) ? k.uuid_prefix : nil),
+ properties: {
+ uuid: {
+ type: "string",
+ description: "Object ID."
+ },
+ etag: {
+ type: "string",
+ description: "Object version."
}
- },
- schemas: {},
- resources: {}
+ }.merge(object_properties)
}
-
- ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
- begin
- ctl_class = "Arvados::V1::#{k.to_s.pluralize}Controller".constantize
- rescue
- # No controller -> no discovery.
- next
- end
- object_properties = {}
- k.columns.
- select { |col| col.name != 'id' && !col.name.start_with?('secret_') }.
- collect do |col|
- if k.serialized_attributes.has_key? col.name
- object_properties[col.name] = {
- type: k.serialized_attributes[col.name].object_class.to_s
- }
- elsif k.attribute_types[col.name].is_a? JsonbType::Hash
- object_properties[col.name] = {
- type: Hash.to_s
- }
- elsif k.attribute_types[col.name].is_a? JsonbType::Array
- object_properties[col.name] = {
- type: Array.to_s
- }
- else
- object_properties[col.name] = {
- type: col.type
- }
- end
- end
- discovery[:schemas][k.to_s + 'List'] = {
- id: k.to_s + 'List',
- description: k.to_s + ' list',
- type: "object",
- properties: {
- kind: {
- type: "string",
- description: "Object type. Always arvados##{k.to_s.camelcase(:lower)}List.",
- default: "arvados##{k.to_s.camelcase(:lower)}List"
- },
- etag: {
- type: "string",
- description: "List version."
- },
- items: {
- type: "array",
- description: "The list of #{k.to_s.pluralize}.",
- items: {
- "$ref" => k.to_s
+ discovery[:resources][k.to_s.underscore.pluralize] = {
+ methods: {
+ get: {
+ id: "arvados.#{k.to_s.underscore.pluralize}.get",
+ path: "#{k.to_s.underscore.pluralize}/{uuid}",
+ httpMethod: "GET",
+ description: "Gets a #{k.to_s}'s metadata by UUID.",
+ parameters: {
+ uuid: {
+ type: "string",
+ description: "The UUID of the #{k.to_s} in question.",
+ required: true,
+ location: "path"
}
},
- next_link: {
- type: "string",
- description: "A link to the next page of #{k.to_s.pluralize}."
- },
- next_page_token: {
- type: "string",
- description: "The page token for the next page of #{k.to_s.pluralize}."
- },
- selfLink: {
- type: "string",
- description: "A link back to this list."
- }
- }
- }
- discovery[:schemas][k.to_s] = {
- id: k.to_s,
- description: k.to_s,
- type: "object",
- uuidPrefix: (k.respond_to?(:uuid_prefix) ? k.uuid_prefix : nil),
- properties: {
- uuid: {
- type: "string",
- description: "Object ID."
- },
- etag: {
- type: "string",
- description: "Object version."
- }
- }.merge(object_properties)
- }
- discovery[:resources][k.to_s.underscore.pluralize] = {
- methods: {
- get: {
- id: "arvados.#{k.to_s.underscore.pluralize}.get",
- path: "#{k.to_s.underscore.pluralize}/{uuid}",
- httpMethod: "GET",
- description: "Gets a #{k.to_s}'s metadata by UUID.",
- parameters: {
- uuid: {
- type: "string",
- description: "The UUID of the #{k.to_s} in question.",
- required: true,
- location: "path"
- }
- },
- parameterOrder: [
- "uuid"
- ],
- response: {
- "$ref" => k.to_s
- },
- scopes: [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
+ parameterOrder: [
+ "uuid"
+ ],
+ response: {
+ "$ref" => k.to_s
},
- index: {
- id: "arvados.#{k.to_s.underscore.pluralize}.index",
- path: k.to_s.underscore.pluralize,
- httpMethod: "GET",
- description:
- %|Index #{k.to_s.pluralize}.
+ scopes: [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ index: {
+ id: "arvados.#{k.to_s.underscore.pluralize}.index",
+ path: k.to_s.underscore.pluralize,
+ httpMethod: "GET",
+ description:
+ %|Index #{k.to_s.pluralize}.
The index
method returns a
resource list of
@@ -251,243 +250,242 @@ class Arvados::V1::SchemaController < ApplicationController
"request_time":0.157236317
}
|,
- parameters: {
- },
- response: {
- "$ref" => "#{k.to_s}List"
- },
- scopes: [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
+ parameters: {
},
- create: {
- id: "arvados.#{k.to_s.underscore.pluralize}.create",
- path: "#{k.to_s.underscore.pluralize}",
- httpMethod: "POST",
- description: "Create a new #{k.to_s}.",
- parameters: {},
- request: {
- required: true,
- properties: {
- k.to_s.underscore => {
- "$ref" => k.to_s
- }
- }
- },
- response: {
- "$ref" => k.to_s
- },
- scopes: [
- "https://api.arvados.org/auth/arvados"
- ]
+ response: {
+ "$ref" => "#{k.to_s}List"
},
- update: {
- id: "arvados.#{k.to_s.underscore.pluralize}.update",
- path: "#{k.to_s.underscore.pluralize}/{uuid}",
- httpMethod: "PUT",
- description: "Update attributes of an existing #{k.to_s}.",
- parameters: {
- uuid: {
- type: "string",
- description: "The UUID of the #{k.to_s} in question.",
- required: true,
- location: "path"
+ scopes: [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
+ create: {
+ id: "arvados.#{k.to_s.underscore.pluralize}.create",
+ path: "#{k.to_s.underscore.pluralize}",
+ httpMethod: "POST",
+ description: "Create a new #{k.to_s}.",
+ parameters: {},
+ request: {
+ required: true,
+ properties: {
+ k.to_s.underscore => {
+ "$ref" => k.to_s
}
- },
- request: {
+ }
+ },
+ response: {
+ "$ref" => k.to_s
+ },
+ scopes: [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ update: {
+ id: "arvados.#{k.to_s.underscore.pluralize}.update",
+ path: "#{k.to_s.underscore.pluralize}/{uuid}",
+ httpMethod: "PUT",
+ description: "Update attributes of an existing #{k.to_s}.",
+ parameters: {
+ uuid: {
+ type: "string",
+ description: "The UUID of the #{k.to_s} in question.",
required: true,
- properties: {
- k.to_s.underscore => {
- "$ref" => k.to_s
- }
+ location: "path"
+ }
+ },
+ request: {
+ required: true,
+ properties: {
+ k.to_s.underscore => {
+ "$ref" => k.to_s
}
- },
+ }
+ },
+ response: {
+ "$ref" => k.to_s
+ },
+ scopes: [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ },
+ delete: {
+ id: "arvados.#{k.to_s.underscore.pluralize}.delete",
+ path: "#{k.to_s.underscore.pluralize}/{uuid}",
+ httpMethod: "DELETE",
+ description: "Delete an existing #{k.to_s}.",
+ parameters: {
+ uuid: {
+ type: "string",
+ description: "The UUID of the #{k.to_s} in question.",
+ required: true,
+ location: "path"
+ }
+ },
+ response: {
+ "$ref" => k.to_s
+ },
+ scopes: [
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ }
+ }
+ # Check for Rails routes that don't match the usual actions
+ # listed above
+ d_methods = discovery[:resources][k.to_s.underscore.pluralize][:methods]
+ Rails.application.routes.routes.each do |route|
+ action = route.defaults[:action]
+ httpMethod = ['GET', 'POST', 'PUT', 'DELETE'].map { |method|
+ method if route.verb.match(method)
+ }.compact.first
+ if httpMethod and
+ route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize and
+ ctl_class.action_methods.include? action
+ if !d_methods[action.to_sym]
+ method = {
+ id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
+ path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
+ httpMethod: httpMethod,
+ description: "#{action} #{k.to_s.underscore.pluralize}",
+ parameters: {},
response: {
- "$ref" => k.to_s
+ "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
},
scopes: [
- "https://api.arvados.org/auth/arvados"
- ]
- },
- delete: {
- id: "arvados.#{k.to_s.underscore.pluralize}.delete",
- path: "#{k.to_s.underscore.pluralize}/{uuid}",
- httpMethod: "DELETE",
- description: "Delete an existing #{k.to_s}.",
- parameters: {
- uuid: {
+ "https://api.arvados.org/auth/arvados"
+ ]
+ }
+ route.segment_keys.each do |key|
+ if key != :format
+ key = :uuid if key == :id
+ method[:parameters][key] = {
type: "string",
- description: "The UUID of the #{k.to_s} in question.",
+ description: "",
required: true,
location: "path"
}
- },
- response: {
- "$ref" => k.to_s
- },
- scopes: [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- }
- }
- # Check for Rails routes that don't match the usual actions
- # listed above
- d_methods = discovery[:resources][k.to_s.underscore.pluralize][:methods]
- Rails.application.routes.routes.each do |route|
- action = route.defaults[:action]
- httpMethod = ['GET', 'POST', 'PUT', 'DELETE'].map { |method|
- method if route.verb.match(method)
- }.compact.first
- if httpMethod and
- route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize and
- ctl_class.action_methods.include? action
- if !d_methods[action.to_sym]
- method = {
- id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
- path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
- httpMethod: httpMethod,
- description: "#{action} #{k.to_s.underscore.pluralize}",
- parameters: {},
- response: {
- "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
- },
- scopes: [
- "https://api.arvados.org/auth/arvados"
- ]
- }
- route.segment_keys.each do |key|
- if key != :format
- key = :uuid if key == :id
- method[:parameters][key] = {
- type: "string",
- description: "",
- required: true,
- location: "path"
- }
- end
end
- else
- # We already built a generic method description, but we
- # might find some more required parameters through
- # introspection.
- method = d_methods[action.to_sym]
end
- if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
- ctl_class.send("_#{action}_requires_parameters".to_sym).each do |l, v|
- if v.is_a? Hash
- method[:parameters][l] = v
- else
- method[:parameters][l] = {}
- end
- if !method[:parameters][l][:default].nil?
- # The JAVA SDK is sensitive to all values being strings
- method[:parameters][l][:default] = method[:parameters][l][:default].to_s
- end
- method[:parameters][l][:type] ||= 'string'
- method[:parameters][l][:description] ||= ''
- method[:parameters][l][:location] = (route.segment_keys.include?(l) ? 'path' : 'query')
- if method[:parameters][l][:required].nil?
- method[:parameters][l][:required] = v != false
- end
+ else
+ # We already built a generic method description, but we
+ # might find some more required parameters through
+ # introspection.
+ method = d_methods[action.to_sym]
+ end
+ if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
+ ctl_class.send("_#{action}_requires_parameters".to_sym).each do |l, v|
+ if v.is_a? Hash
+ method[:parameters][l] = v
+ else
+ method[:parameters][l] = {}
+ end
+ if !method[:parameters][l][:default].nil?
+ # The JAVA SDK is sensitive to all values being strings
+ method[:parameters][l][:default] = method[:parameters][l][:default].to_s
+ end
+ method[:parameters][l][:type] ||= 'string'
+ method[:parameters][l][:description] ||= ''
+ method[:parameters][l][:location] = (route.segment_keys.include?(l) ? 'path' : 'query')
+ if method[:parameters][l][:required].nil?
+ method[:parameters][l][:required] = v != false
end
end
- d_methods[action.to_sym] = method
+ end
+ d_methods[action.to_sym] = method
- if action == 'index'
- list_method = method.dup
- list_method[:id].sub!('index', 'list')
- list_method[:description].sub!('Index', 'List')
- list_method[:description].sub!('index', 'list')
- d_methods[:list] = list_method
- end
+ if action == 'index'
+ list_method = method.dup
+ list_method[:id].sub!('index', 'list')
+ list_method[:description].sub!('Index', 'List')
+ list_method[:description].sub!('index', 'list')
+ d_methods[:list] = list_method
end
end
end
+ end
- # The 'replace_files' option is implemented in lib/controller,
- # not Rails -- we just need to add it here so discovery-aware
- # clients know how to validate it.
- [:create, :update].each do |action|
- discovery[:resources]['collections'][:methods][action][:parameters]['replace_files'] = {
- type: 'object',
- description: 'Files and directories to initialize/replace with content from other collections.',
- required: false,
- location: 'query',
- properties: {},
- additionalProperties: {type: 'string'},
- }
- end
+ # The 'replace_files' option is implemented in lib/controller,
+ # not Rails -- we just need to add it here so discovery-aware
+ # clients know how to validate it.
+ [:create, :update].each do |action|
+ discovery[:resources]['collections'][:methods][action][:parameters]['replace_files'] = {
+ type: 'object',
+ description: 'Files and directories to initialize/replace with content from other collections.',
+ required: false,
+ location: 'query',
+ properties: {},
+ additionalProperties: {type: 'string'},
+ }
+ end
- discovery[:resources]['configs'] = {
- methods: {
- get: {
- id: "arvados.configs.get",
- path: "config",
- httpMethod: "GET",
- description: "Get public config",
- parameters: {
- },
- parameterOrder: [
- ],
- response: {
- },
- scopes: [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
+ discovery[:resources]['configs'] = {
+ methods: {
+ get: {
+ id: "arvados.configs.get",
+ path: "config",
+ httpMethod: "GET",
+ description: "Get public config",
+ parameters: {
},
- }
+ parameterOrder: [
+ ],
+ response: {
+ },
+ scopes: [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
}
+ }
- discovery[:resources]['vocabularies'] = {
- methods: {
- get: {
- id: "arvados.vocabularies.get",
- path: "vocabulary",
- httpMethod: "GET",
- description: "Get vocabulary definition",
- parameters: {
- },
- parameterOrder: [
- ],
- response: {
- },
- scopes: [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
+ discovery[:resources]['vocabularies'] = {
+ methods: {
+ get: {
+ id: "arvados.vocabularies.get",
+ path: "vocabulary",
+ httpMethod: "GET",
+ description: "Get vocabulary definition",
+ parameters: {
},
- }
+ parameterOrder: [
+ ],
+ response: {
+ },
+ scopes: [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
}
+ }
- discovery[:resources]['sys'] = {
- methods: {
- get: {
- id: "arvados.sys.trash_sweep",
- path: "sys/trash_sweep",
- httpMethod: "POST",
- description: "apply scheduled trash and delete operations",
- parameters: {
- },
- parameterOrder: [
- ],
- response: {
- },
- scopes: [
- "https://api.arvados.org/auth/arvados",
- "https://api.arvados.org/auth/arvados.readonly"
- ]
+ discovery[:resources]['sys'] = {
+ methods: {
+ get: {
+ id: "arvados.sys.trash_sweep",
+ path: "sys/trash_sweep",
+ httpMethod: "POST",
+ description: "apply scheduled trash and delete operations",
+ parameters: {
},
- }
+ parameterOrder: [
+ ],
+ response: {
+ },
+ scopes: [
+ "https://api.arvados.org/auth/arvados",
+ "https://api.arvados.org/auth/arvados.readonly"
+ ]
+ },
}
+ }
- Rails.configuration.API.DisabledAPIs.each do |method, _|
- ctrl, action = method.to_s.split('.', 2)
- discovery[:resources][ctrl][:methods].delete(action.to_sym)
- end
- discovery
+ Rails.configuration.API.DisabledAPIs.each do |method, _|
+ ctrl, action = method.to_s.split('.', 2)
+ discovery[:resources][ctrl][:methods].delete(action.to_sym)
end
+ discovery
end
end
diff --git a/services/api/app/controllers/arvados/v1/users_controller.rb b/services/api/app/controllers/arvados/v1/users_controller.rb
index 507cb4ac33..031dd2e4f9 100644
--- a/services/api/app/controllers/arvados/v1/users_controller.rb
+++ b/services/api/app/controllers/arvados/v1/users_controller.rb
@@ -16,37 +16,13 @@ class Arvados::V1::UsersController < ApplicationController
# records from LoginCluster.
def batch_update
@objects = []
- params[:updates].andand.each do |uuid, attrs|
- begin
- u = User.find_or_create_by(uuid: uuid)
- rescue ActiveRecord::RecordNotUnique
- retry
- end
- needupdate = {}
- nullify_attrs(attrs).each do |k,v|
- if !v.nil? && u.send(k) != v
- needupdate[k] = v
- end
- end
- if needupdate.length > 0
- begin
- u.update_attributes!(needupdate)
- rescue ActiveRecord::RecordInvalid
- loginCluster = Rails.configuration.Login.LoginCluster
- if u.uuid[0..4] == loginCluster && !needupdate[:username].nil?
- local_user = User.find_by_username(needupdate[:username])
- # A cached user record from the LoginCluster is stale, reset its username
- # and retry the update operation.
- if local_user.andand.uuid[0..4] == loginCluster && local_user.uuid != u.uuid
- new_username = "#{needupdate[:username]}conflict#{rand(99999999)}"
- Rails.logger.warn("cached username '#{needupdate[:username]}' collision with user '#{local_user.uuid}' - renaming to '#{new_username}' before retrying")
- local_user.update_attributes!({username: new_username})
- retry
- end
- end
- raise # Not the issue we're handling above
- end
- end
+ # update_remote_user takes a row lock on the User record, so sort
+ # the keys so we always lock them in the same order.
+ sorted = params[:updates].keys.sort
+ sorted.each do |uuid|
+ attrs = params[:updates][uuid]
+ attrs[:uuid] = uuid
+ u = User.update_remote_user nullify_attrs(attrs)
@objects << u
end
@offset = 0
@@ -103,7 +79,7 @@ class Arvados::V1::UsersController < ApplicationController
collect(&:head_uuid)
todo_uuids = required_uuids - signed_uuids
if todo_uuids.empty?
- @object.update_attributes is_active: true
+ @object.update is_active: true
logger.info "User #{@object.uuid} activated"
else
logger.warn "User #{@object.uuid} called users.activate " +
@@ -274,7 +250,7 @@ class Arvados::V1::UsersController < ApplicationController
return super if @read_users.any?(&:is_admin)
if params[:uuid] != current_user.andand.uuid
# Non-admin index/show returns very basic information about readable users.
- safe_attrs = ["uuid", "is_active", "email", "first_name", "last_name", "username", "can_write", "can_manage"]
+ safe_attrs = ["uuid", "is_active", "is_admin", "is_invited", "email", "first_name", "last_name", "username", "can_write", "can_manage", "kind"]
if @select
@select = @select & safe_attrs
else
@@ -282,6 +258,13 @@ class Arvados::V1::UsersController < ApplicationController
end
@filters += [['is_active', '=', true]]
end
+ # This gets called from within find_object_by_uuid.
+ # find_object_by_uuid stores the original value of @select in
+ # @preserve_select, edits the value of @select, calls
+ # find_objects_for_index, then restores @select from the value
+ # of @preserve_select. So if we want our updated value of
+ # @select here to stick, we have to set @preserve_select.
+ @preserve_select = @select
super
end
diff --git a/services/api/app/controllers/database_controller.rb b/services/api/app/controllers/database_controller.rb
index 69453959d2..8e61d16fa8 100644
--- a/services/api/app/controllers/database_controller.rb
+++ b/services/api/app/controllers/database_controller.rb
@@ -18,10 +18,10 @@ class DatabaseController < ApplicationController
user_uuids = User.
where('email is null or (email not like ? and email not like ?)', '%@example.com', '%.example.com').
collect(&:uuid)
- fixture_uuids =
- YAML::load_file(File.expand_path('../../../test/fixtures/users.yml',
- __FILE__)).
- values.collect { |u| u['uuid'] }
+ fnm = File.expand_path('../../../test/fixtures/users.yml', __FILE__)
+ fixture_uuids = File.open(fnm) do |f|
+ YAML.safe_load(f, filename: fnm, permitted_classes: [Time]).values.collect { |u| u['uuid'] }
+ end
unexpected_uuids = user_uuids - fixture_uuids
if unexpected_uuids.any?
logger.error("Running in test environment, but non-fixture users exist: " +
@@ -61,7 +61,7 @@ class DatabaseController < ApplicationController
ActiveRecord::FixtureSet.
create_fixtures(Rails.root.join('test', 'fixtures'), fixturesets)
- # Dump cache of permissions etc.
+ # Reset cache and global state
Rails.cache.clear
ActiveRecord::Base.connection.clear_query_cache
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
index 4b2b985e02..b7693f3420 100644
--- a/services/api/app/controllers/static_controller.rb
+++ b/services/api/app/controllers/static_controller.rb
@@ -13,7 +13,7 @@ class StaticController < ApplicationController
respond_to do |f|
f.html do
if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.empty?
- redirect_to Rails.configuration.Services.Workbench1.ExternalURL.to_s
+ redirect_to Rails.configuration.Services.Workbench1.ExternalURL.to_s, allow_other_host: true
else
render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
end
diff --git a/services/api/app/controllers/sys_controller.rb b/services/api/app/controllers/sys_controller.rb
index a67b124bd0..7d20cf77fd 100644
--- a/services/api/app/controllers/sys_controller.rb
+++ b/services/api/app/controllers/sys_controller.rb
@@ -12,9 +12,11 @@ class SysController < ApplicationController
# Sweep trashed collections
Collection.
where('delete_at is not null and delete_at < statement_timestamp()').
+ in_batches(of: 15).
destroy_all
Collection.
where('is_trashed = false and trash_at < statement_timestamp()').
+ in_batches(of: 15).
update_all('is_trashed = true')
# Sweep trashed projects and their contents (as well as role
@@ -50,7 +52,7 @@ class SysController < ApplicationController
skipped_classes = ['Group', 'User']
ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')
- klass.where({owner_uuid: p_uuid}).destroy_all
+ klass.where({owner_uuid: p_uuid}).in_batches(of: 15).destroy_all
end
end
# Finally delete the project itself
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
index ae34fa7600..0c67c9c9d8 100644
--- a/services/api/app/controllers/user_sessions_controller.rb
+++ b/services/api/app/controllers/user_sessions_controller.rb
@@ -11,24 +11,33 @@ class UserSessionsController < ApplicationController
respond_to :html
+ def login
+ return send_error "Legacy code path no longer supported", status: 404
+ end
+
+ def logout
+ return send_error "Legacy code path no longer supported", status: 404
+ end
+
# create a new session
def create
- if !Rails.configuration.Login.LoginCluster.empty? and Rails.configuration.Login.LoginCluster != Rails.configuration.ClusterID
- raise "Local login disabled when LoginCluster is set"
- end
-
- max_expires_at = nil
- if params[:provider] == 'controller'
- if request.headers['Authorization'] != 'Bearer ' + Rails.configuration.SystemRootToken
- return send_error('Invalid authorization header', status: 401)
- end
- # arvados-controller verified the user and is passing auth_info
- # in request params.
- authinfo = SafeJSON.load(params[:auth_info])
- max_expires_at = authinfo["expires_at"]
- else
+ remote, return_to_url = params[:return_to].split(',', 2)
+ if params[:provider] != 'controller' ||
+ return_to_url != 'https://controller.api.client.invalid'
return send_error "Legacy code path no longer supported", status: 404
end
+ if request.headers['Authorization'] != 'Bearer ' + Rails.configuration.SystemRootToken
+ return send_error('Invalid authorization header', status: 401)
+ end
+ if remote == ''
+ remote = nil
+ elsif remote !~ /^[0-9a-z]{5}$/
+ return send_error 'Invalid remote cluster id', status: 400
+ end
+ # arvados-controller verified the user and is passing auth_info
+ # in request params.
+ authinfo = SafeJSON.load(params[:auth_info])
+ max_expires_at = authinfo["expires_at"]
if !authinfo['user_uuid'].blank?
user = User.find_by_uuid(authinfo['user_uuid'])
@@ -49,40 +58,13 @@ class UserSessionsController < ApplicationController
# For the benefit of functional and integration tests:
@user = user
- if user.uuid[0..4] != Rails.configuration.ClusterID
- # Actually a remote user
- # Send them to their home cluster's login
- rh = Rails.configuration.RemoteClusters[user.uuid[0..4]]
- remote, return_to_url = params[:return_to].split(',', 2)
- @remotehomeurl = "#{rh.Scheme || "https"}://#{rh.Host}/login?remote=#{Rails.configuration.ClusterID}&return_to=#{return_to_url}"
- render
- return
- end
-
# prevent ArvadosModel#before_create and _update from throwing
# "unauthorized":
Thread.current[:user] = user
user.save or raise Exception.new(user.errors.messages)
- # Give the authenticated user a cookie for direct API access
- session[:user_id] = user.id
- session[:api_client_uuid] = nil
- session[:api_client_trusted] = true # full permission to see user's secrets
-
- @redirect_to = root_path
- if params.has_key?(:return_to)
- # return_to param's format is 'remote,return_to_url'. This comes from login()
- # encoding the remote=zbbbb parameter passed by a client asking for a salted
- # token.
- remote, return_to_url = params[:return_to].split(',', 2)
- if remote !~ /^[0-9a-z]{5}$/ && remote != ""
- return send_error 'Invalid remote cluster id', status: 400
- end
- remote = nil if remote == ''
- return send_api_token_to(return_to_url, user, remote, max_expires_at)
- end
- redirect_to @redirect_to
+ return send_api_token_to(return_to_url, user, remote, max_expires_at)
end
# Omniauth failure callback
@@ -90,52 +72,6 @@ class UserSessionsController < ApplicationController
flash[:notice] = params[:message]
end
- # logout - this gets intercepted by controller, so this is probably
- # mostly dead code at this point.
- def logout
- session[:user_id] = nil
-
- flash[:notice] = 'You have logged off'
- return_to = params[:return_to] || root_url
- redirect_to return_to
- end
-
- # login. Redirect to LoginCluster.
- def login
- if params[:remote] !~ /^[0-9a-z]{5}$/ && !params[:remote].nil?
- return send_error 'Invalid remote cluster id', status: 400
- end
- if current_user and params[:return_to]
- # Already logged in; just need to send a token to the requesting
- # API client.
- #
- # FIXME: if current_user has never authorized this app before,
- # ask for confirmation here!
-
- return send_api_token_to(params[:return_to], current_user, params[:remote])
- end
- p = []
- p << "auth_provider=#{CGI.escape(params[:auth_provider])}" if params[:auth_provider]
-
- if !Rails.configuration.Login.LoginCluster.empty? and Rails.configuration.Login.LoginCluster != Rails.configuration.ClusterID
- host = ApiClientAuthorization.remote_host(uuid_prefix: Rails.configuration.Login.LoginCluster)
- if not host
- raise "LoginCluster #{Rails.configuration.Login.LoginCluster} missing from RemoteClusters"
- end
- scheme = "https"
- cluster = Rails.configuration.RemoteClusters[Rails.configuration.Login.LoginCluster]
- if cluster and cluster['Scheme'] and !cluster['Scheme'].empty?
- scheme = cluster['Scheme']
- end
- login_cluster = "#{scheme}://#{host}"
- p << "remote=#{CGI.escape(params[:remote])}" if params[:remote]
- p << "return_to=#{CGI.escape(params[:return_to])}" if params[:return_to]
- redirect_to "#{login_cluster}/login?#{p.join('&')}"
- else
- return send_error "Legacy code path no longer supported", status: 404
- end
- end
-
def send_api_token_to(callback_url, user, remote=nil, token_expiration=nil)
# Give the API client a token for making API calls on behalf of
# the authenticated user
@@ -173,7 +109,7 @@ class UserSessionsController < ApplicationController
token = @api_client_auth.salted_token(remote: remote)
end
callback_url += 'api_token=' + token
- redirect_to callback_url
+ redirect_to callback_url, allow_other_host: true
end
def cross_origin_forbidden
diff --git a/services/api/app/middlewares/arvados_api_token.rb b/services/api/app/middlewares/arvados_api_token.rb
index 2c240984c6..18140e57fe 100644
--- a/services/api/app/middlewares/arvados_api_token.rb
+++ b/services/api/app/middlewares/arvados_api_token.rb
@@ -42,18 +42,35 @@ class ArvadosApiToken
# reader_tokens.
accepted = false
auth = nil
+ remote_errcodes = []
+ remote_errmsgs = []
[params["api_token"],
params["oauth_token"],
env["HTTP_AUTHORIZATION"].andand.match(/(OAuth2|Bearer) ([!-~]+)/).andand[2],
*reader_tokens,
].each do |supplied|
next if !supplied
- try_auth = ApiClientAuthorization.
- validate(token: supplied, remote: remote)
- if try_auth.andand.user
- auth = try_auth
- accepted = supplied
- break
+ begin
+ try_auth = ApiClientAuthorization.validate(token: supplied, remote: remote)
+ rescue => e
+ begin
+ remote_errcodes.append(e.http_status)
+ rescue NoMethodError
+ # The exception is an internal validation problem, not a remote error.
+ next
+ end
+ begin
+ errors = SafeJSON.load(e.res.content)["errors"]
+ rescue
+ errors = nil
+ end
+ remote_errmsgs += errors if errors.is_a?(Array)
+ else
+ if try_auth.andand.user
+ auth = try_auth
+ accepted = supplied
+ break
+ end
end
end
@@ -64,6 +81,24 @@ class ArvadosApiToken
Thread.current[:token] = accepted
Thread.current[:user] = auth.andand.user
- @app.call env if @app
+ if auth.nil? and not remote_errcodes.empty?
+ # If we failed to validate any tokens because of remote validation
+ # errors, pass those on to the client. This code is functionally very
+ # similar to ApplicationController#render_error, but the implementation
+ # is very different because we're a Rack middleware, not in
+ # ActionDispatch land yet.
+ remote_errmsgs.prepend("failed to validate remote token")
+ error_content = {
+ error_token: "%d+%08x" % [Time.now.utc.to_i, rand(16 ** 8)],
+ errors: remote_errmsgs,
+ }
+ [
+ remote_errcodes.max,
+ {"Content-Type": "application/json"},
+ SafeJSON.dump(error_content).html_safe,
+ ]
+ else
+ @app.call env if @app
+ end
end
end
diff --git a/services/api/app/models/api_client.rb b/services/api/app/models/api_client.rb
index c914051a34..791b971680 100644
--- a/services/api/app/models/api_client.rb
+++ b/services/api/app/models/api_client.rb
@@ -32,7 +32,13 @@ class ApiClient < ArvadosModel
end
Rails.configuration.Login.TrustedClients.keys.each do |url|
- if norm_url_prefix == norm(url)
+ trusted = norm(url)
+ if norm_url_prefix == trusted
+ return true
+ end
+ if trusted.host.to_s.starts_with?("*.") &&
+ norm_url_prefix.to_s.starts_with?(trusted.scheme + "://") &&
+ norm_url_prefix.to_s.ends_with?(trusted.to_s[trusted.scheme.length + 4...])
return true
end
end
@@ -43,13 +49,14 @@ class ApiClient < ArvadosModel
def norm url
# normalize URL for comparison
url = URI(url.to_s)
- if url.scheme == "https"
- url.port == "443"
- end
- if url.scheme == "http"
- url.port == "80"
+ if url.scheme == "https" && url.port == ""
+ url.port = "443"
+ elsif url.scheme == "http" && url.port == ""
+ url.port = "80"
end
url.path = "/"
+ url.query = nil
+ url.fragment = nil
url
end
end
diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb
index 52922d32b1..8311278676 100644
--- a/services/api/app/models/api_client_authorization.rb
+++ b/services/api/app/models/api_client_authorization.rb
@@ -6,11 +6,12 @@ class ApiClientAuthorization < ArvadosModel
include HasUuid
include KindAndEtag
include CommonApiTemplate
+ include Rails.application.routes.url_helpers
extend CurrentApiClient
extend DbCurrentTime
- belongs_to :api_client
- belongs_to :user
+ belongs_to :api_client, optional: true
+ belongs_to :user, optional: true
after_initialize :assign_random_api_token
serialize :scopes, Array
@@ -78,7 +79,9 @@ class ApiClientAuthorization < ArvadosModel
def scopes_allow_request?(request)
method = request.request_method
- if method == 'HEAD'
+ if method == 'GET' and request.path == url_for(controller: 'arvados/v1/api_client_authorizations', action: 'current', only_path: true)
+ true
+ elsif method == 'HEAD'
(scopes_allow?(['HEAD', request.path].join(' ')) ||
scopes_allow?(['GET', request.path].join(' ')))
else
@@ -271,136 +274,110 @@ class ApiClientAuthorization < ArvadosModel
Rails.logger.warn "remote authentication rejected: no host for #{upstream_cluster_id.inspect}"
return nil
end
+ remote_url = URI::parse("https://#{host}/")
+ remote_query = {"remote" => Rails.configuration.ClusterID}
+ remote_headers = {"Authorization" => "Bearer #{token}"}
- begin
- remote_user = SafeJSON.load(
- clnt.get_content('https://' + host + '/arvados/v1/users/current',
- {'remote' => Rails.configuration.ClusterID},
- {'Authorization' => 'Bearer ' + token}))
- rescue => e
- Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
- return nil
- end
-
- # Check the response is well formed.
- if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)
- Rails.logger.warn "remote authentication rejected: remote_user=#{remote_user.inspect}"
- return nil
- end
-
- remote_user_prefix = remote_user['uuid'][0..4]
-
- # Get token scope, and make sure we use the same UUID as the
- # remote when caching the token.
+ # First get the current token. This query is not limited by token scopes,
+ # and tells us the user's UUID via owner_uuid, so this gives us enough
+ # information to load a local user record from the database if one exists.
remote_token = nil
begin
remote_token = SafeJSON.load(
- clnt.get_content('https://' + host + '/arvados/v1/api_client_authorizations/current',
- {'remote' => Rails.configuration.ClusterID},
- {'Authorization' => 'Bearer ' + token}))
+ clnt.get_content(
+ remote_url.merge("arvados/v1/api_client_authorizations/current"),
+ remote_query, remote_headers,
+ ))
Rails.logger.debug "retrieved remote token #{remote_token.inspect}"
token_uuid = remote_token['uuid']
if !token_uuid.match(HasUuid::UUID_REGEX) || token_uuid[0..4] != upstream_cluster_id
raise "remote cluster #{upstream_cluster_id} returned invalid token uuid #{token_uuid.inspect}"
end
rescue HTTPClient::BadResponseError => e
- if e.res.status != 401
- raise
+ if e.res.status_code >= 400 && e.res.status_code < 500
+ # Remote cluster does not accept this token.
+ return nil
end
- rev = SafeJSON.load(clnt.get_content('https://' + host + '/discovery/v1/apis/arvados/v1/rest'))['revision']
- if rev >= '20010101' && rev < '20210503'
- Rails.logger.warn "remote cluster #{upstream_cluster_id} at #{host} with api rev #{rev} does not provide token expiry and scopes; using scopes=['all']"
- else
- # remote server is new enough that it should have accepted
- # this request if the token was valid
- raise
+ # CurrentApiToken#call and ApplicationController#render_error will
+ # propagate the status code from the #http_status method, so define
+ # that here.
+ def e.http_status
+ self.res.status_code
end
+ raise
+ # TODO #20927: Catch network exceptions and assign a 5xx status to them so
+ # the client knows they're a temporary problem.
rescue => e
Rails.logger.warn "error getting remote token details for #{token.inspect}: #{e}"
return nil
end
- # Clusters can only authenticate for their own users.
- if remote_user_prefix != upstream_cluster_id
- Rails.logger.warn "remote authentication rejected: claimed remote user #{remote_user_prefix} but token was issued by #{upstream_cluster_id}"
+ # Next, load the token's user record from the database (might be nil).
+ remote_user_prefix, remote_user_suffix = remote_token['owner_uuid'].split('-', 2)
+ if anonymous_user_uuid.end_with?(remote_user_suffix)
+ # Special case: map the remote anonymous user to local anonymous user
+ remote_user_uuid = anonymous_user_uuid
+ else
+ remote_user_uuid = remote_token['owner_uuid']
+ end
+ user = User.find_by_uuid(remote_user_uuid)
+
+ # Next, try to load the remote user. If this succeeds, we'll use this
+ # information to update/create the local database record as necessary.
+ # If this fails for any reason, but we successfully loaded a user record
+ # from the database, we'll just rely on that information.
+ remote_user = nil
+ begin
+ remote_user = SafeJSON.load(
+ clnt.get_content(
+ remote_url.merge("arvados/v1/users/current"),
+ remote_query, remote_headers,
+ ))
+ rescue HTTPClient::BadResponseError => e
+ # If user is defined, we will use that alone for auth, see below.
+ if user.nil?
+ # See rationale in the previous BadResponseError rescue.
+ def e.http_status
+ self.res.status_code
+ end
+ raise
+ end
+ # TODO #20927: Catch network exceptions and assign a 5xx status to them so
+ # the client knows they're a temporary problem.
+ rescue => e
+ Rails.logger.warn "getting remote user with token #{token.inspect} failed: #{e}"
+ else
+ # Check the response is well formed.
+ if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)
+ Rails.logger.warn "malformed remote user=#{remote_user.inspect}"
+ remote_user = nil
+ # Clusters can only authenticate for their own users.
+ elsif remote_user_prefix != upstream_cluster_id
+ Rails.logger.warn "remote user rejected: claimed remote user #{remote_user_prefix} but token was issued by #{upstream_cluster_id}"
+ remote_user = nil
+ # Force our local copy of a remote root to have a static name
+ elsif system_user_uuid.end_with?(remote_user_suffix)
+ remote_user.update(
+ "first_name" => "root",
+ "last_name" => "from cluster #{remote_user_prefix}",
+ )
+ end
+ end
+
+ if user.nil? and remote_user.nil?
+ Rails.logger.warn "remote token #{token.inspect} rejected: cannot get owner #{remote_user_uuid} from database or remote cluster"
return nil
end
# Invariant: remote_user_prefix == upstream_cluster_id
# therefore: remote_user_prefix != Rails.configuration.ClusterID
-
# Add or update user and token in local database so we can
# validate subsequent requests faster.
- if remote_user['uuid'][-22..-1] == '-tpzed-anonymouspublic'
- # Special case: map the remote anonymous user to local anonymous user
- remote_user['uuid'] = anonymous_user_uuid
- end
-
- user = User.find_by_uuid(remote_user['uuid'])
-
- if !user
- # Create a new record for this user.
- user = User.new(uuid: remote_user['uuid'],
- is_active: false,
- is_admin: false,
- email: remote_user['email'],
- owner_uuid: system_user_uuid)
- user.set_initial_username(requested: remote_user['username'])
- end
-
- # Sync user record.
act_as_system_user do
- %w[first_name last_name email prefs].each do |attr|
- user.send(attr+'=', remote_user[attr])
- end
-
- if remote_user['uuid'][-22..-1] == '-tpzed-000000000000000'
- user.first_name = "root"
- user.last_name = "from cluster #{remote_user_prefix}"
- end
-
- begin
- user.save!
- rescue ActiveRecord::RecordInvalid, ActiveRecord::RecordNotUnique
- Rails.logger.debug("remote user #{remote_user['uuid']} already exists, retrying...")
- # Some other request won the race: retry fetching the user record.
- user = User.find_by_uuid(remote_user['uuid'])
- if !user
- Rails.logger.warn("cannot find or create remote user #{remote_user['uuid']}")
- return nil
- end
- end
-
- if user.is_invited && !remote_user['is_invited']
- # Remote user is not "invited" state, they should be unsetup, which
- # also makes them inactive.
- user.unsetup
- else
- if !user.is_invited && remote_user['is_invited'] and
- (remote_user_prefix == Rails.configuration.Login.LoginCluster or
- Rails.configuration.Users.AutoSetupNewUsers or
- Rails.configuration.Users.NewUsersAreActive or
- Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
- user.setup
- end
-
- if !user.is_active && remote_user['is_active'] && user.is_invited and
- (remote_user_prefix == Rails.configuration.Login.LoginCluster or
- Rails.configuration.Users.NewUsersAreActive or
- Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
- user.update_attributes!(is_active: true)
- elsif user.is_active && !remote_user['is_active']
- user.update_attributes!(is_active: false)
- end
-
- if remote_user_prefix == Rails.configuration.Login.LoginCluster and
- user.is_active and
- user.is_admin != remote_user['is_admin']
- # Remote cluster controls our user database, including the
- # admin flag.
- user.update_attributes!(is_admin: remote_user['is_admin'])
- end
+ if remote_user && remote_user_uuid != anonymous_user_uuid
+ # Sync user record if we loaded a remote user.
+ user = User.update_remote_user remote_user
end
# If stored_secret is set, we save stored_secret in the database
@@ -426,15 +403,24 @@ class ApiClientAuthorization < ArvadosModel
end
rescue ActiveRecord::RecordNotUnique
Rails.logger.debug("cached remote token #{token_uuid} already exists, retrying...")
- # Some other request won the race: retry just once before erroring out
- if (retries += 1) <= 1
+ # Another request won the race (trying to find_or_create the
+ # same token UUID) ...and/or... there is an expired entry with
+ # the same secret but a different UUID (e.g., the token is an
+ # OIDC access token and [a] our database has an expired cached
+ # row that was not used above, and [b] the remote cluster had
+ # deleted its expired cached row so it assigned a new UUID).
+ #
+ # Delete any conflicting row if any. Retry twice (in case we
+ # hit both of those situations at once), then give up.
+ if (retries += 1) <= 2
+ ApiClientAuthorization.where('api_token=? and uuid<>?', stored_secret, token_uuid).delete_all
retry
else
Rails.logger.warn("cannot find or create cached remote token #{token_uuid}")
return nil
end
end
- auth.update_attributes!(user: user,
+ auth.update!(user: user,
api_token: stored_secret,
api_client_id: 0,
scopes: scopes,
diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb
index c2725506c0..9ee2cca410 100644
--- a/services/api/app/models/arvados_model.rb
+++ b/services/api/app/models/arvados_model.rb
@@ -24,6 +24,7 @@ class ArvadosModel < ApplicationRecord
before_destroy :ensure_owner_uuid_is_permitted
before_destroy :ensure_permission_to_destroy
before_create :update_modified_by_fields
+ before_create :add_uuid_to_name, :if => Proc.new { @_add_uuid_to_name }
before_update :maybe_update_modified_by_fields
after_create :log_create
after_update :log_update
@@ -37,9 +38,9 @@ class ArvadosModel < ApplicationRecord
# user.uuid==object.owner_uuid.
has_many(:permissions,
->{where(link_class: 'permission')},
- foreign_key: :head_uuid,
+ foreign_key: 'head_uuid',
class_name: 'Link',
- primary_key: :uuid)
+ primary_key: 'uuid')
# If async is true at create or update, permission graph
# update is deferred allowing making multiple calls without the performance
@@ -145,7 +146,7 @@ class ArvadosModel < ApplicationRecord
super(permit_attribute_params(raw_params), *args)
end
- def update_attributes raw_params={}, *args
+ def update raw_params={}, *args
super(self.class.permit_attribute_params(raw_params), *args)
end
@@ -156,7 +157,7 @@ class ArvadosModel < ApplicationRecord
end
def self.searchable_columns operator
- textonly_operator = !operator.match(/[<=>]/)
+ textonly_operator = !operator.match(/[<=>]/) && !operator.in?(['in', 'not in'])
self.columns.select do |col|
case col.type
when :string, :text
@@ -464,26 +465,24 @@ class ArvadosModel < ApplicationRecord
end
end
+ return self if sql_conds == nil
self.where(sql_conds,
user_uuids: all_user_uuids.collect{|c| c["target_uuid"]},
permission_link_classes: ['permission'])
end
def save_with_unique_name!
- uuid_was = uuid
- name_was = name
max_retries = 2
transaction do
conn = ActiveRecord::Base.connection
conn.exec_query 'SAVEPOINT save_with_unique_name'
begin
save!
+ conn.exec_query 'RELEASE SAVEPOINT save_with_unique_name'
rescue ActiveRecord::RecordNotUnique => rn
raise if max_retries == 0
max_retries -= 1
- conn.exec_query 'ROLLBACK TO SAVEPOINT save_with_unique_name'
-
# Dig into the error to determine if it is specifically calling out a
# (owner_uuid, name) uniqueness violation. In this specific case, and
# the client requested a unique name with ensure_unique_name==true,
@@ -501,27 +500,23 @@ class ArvadosModel < ApplicationRecord
detail = err.result.error_field(PG::Result::PG_DIAG_MESSAGE_DETAIL)
raise unless /^Key \(owner_uuid, name\)=\([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}, .*?\) already exists\./.match detail
- new_name = "#{name_was} (#{db_current_time.utc.iso8601(3)})"
- if new_name == name
- # If the database is fast enough to do two attempts in the
- # same millisecond, we need to wait to ensure we try a
- # different timestamp on each attempt.
- sleep 0.002
- new_name = "#{name_was} (#{db_current_time.utc.iso8601(3)})"
- end
+ conn.exec_query 'ROLLBACK TO SAVEPOINT save_with_unique_name'
- self[:name] = new_name
- if uuid_was.nil? && !uuid.nil?
+ if uuid_was.nil?
+ # new record, the uuid caused a name collision (very
+ # unlikely but possible), so generate new uuid
self[:uuid] = nil
if self.is_a? Collection
- # Reset so that is assigned to the new UUID
+ # Also needs to be reset
self[:current_version_uuid] = nil
end
+ # need to adjust the name after the uuid has been generated
+ add_uuid_to_make_unique_name
+ else
+ # existing record, just update the name directly.
+ add_uuid_to_name
end
- conn.exec_query 'SAVEPOINT save_with_unique_name'
retry
- ensure
- conn.exec_query 'RELEASE SAVEPOINT save_with_unique_name'
end
end
end
@@ -581,6 +576,26 @@ class ArvadosModel < ApplicationRecord
*ft[:param_out])
end
+ @_add_uuid_to_name = false
+ def add_uuid_to_make_unique_name
+ @_add_uuid_to_name = true
+ end
+
+ def add_uuid_to_name
+ # Incorporate the random part of the UUID into the name. This
+ # lets us prevent name collision but the part we add to the name
+ # is still somewhat meaningful (instead of generating a second
+ # random meaningless string).
+ #
+ # Because ArvadosModel is an abstract class and assign_uuid is
+ # part of HasUuid (which is included by the other concrete
+ # classes) the assign_uuid hook gets added (and run) after this
+ # one. So we need to call assign_uuid here to make sure we have a
+ # uuid.
+ assign_uuid
+ self.name = "#{self.name[0..236]} (#{self.uuid[-15..-1]})"
+ end
+
protected
def self.deep_sort_hash(x)
@@ -939,8 +954,6 @@ class ArvadosModel < ApplicationRecord
# hook.
def fill_container_defaults_after_find
fill_container_defaults
- set_attribute_was('runtime_constraints', runtime_constraints)
- set_attribute_was('scheduling_parameters', scheduling_parameters)
clear_changes_information
end
@@ -951,6 +964,10 @@ class ArvadosModel < ApplicationRecord
# value in the database to an implicit zero/false value in an update
# request.
def fill_container_defaults
+ # Make sure this is correctly sorted by key, because we merge in
+ # whatever is in the database on top of it, this will be the order
+ # that gets used downstream rather than the order the keys appear
+ # in the database.
self.runtime_constraints = {
'API' => false,
'cuda' => {
@@ -958,6 +975,7 @@ class ArvadosModel < ApplicationRecord
'driver_version' => '',
'hardware_capability' => '',
},
+ 'keep_cache_disk' => 0,
'keep_cache_ram' => 0,
'ram' => 0,
'vcpus' => 0,
@@ -966,6 +984,7 @@ class ArvadosModel < ApplicationRecord
'max_run_time' => 0,
'partitions' => [],
'preemptible' => false,
+ 'supervisor' => false,
}.merge(attributes['scheduling_parameters'] || {})
end
diff --git a/services/api/app/models/authorized_key.rb b/services/api/app/models/authorized_key.rb
index a5c5081c40..8aefa8db81 100644
--- a/services/api/app/models/authorized_key.rb
+++ b/services/api/app/models/authorized_key.rb
@@ -9,7 +9,11 @@ class AuthorizedKey < ArvadosModel
before_create :permission_to_set_authorized_user_uuid
before_update :permission_to_set_authorized_user_uuid
- belongs_to :authorized_user, :foreign_key => :authorized_user_uuid, :class_name => 'User', :primary_key => :uuid
+ belongs_to :authorized_user,
+ foreign_key: 'authorized_user_uuid',
+ class_name: 'User',
+ primary_key: 'uuid',
+ optional: true
validate :public_key_must_be_unique
@@ -37,17 +41,11 @@ class AuthorizedKey < ArvadosModel
def public_key_must_be_unique
if self.public_key
- valid_key = SSHKey.valid_ssh_public_key? self.public_key
-
- if not valid_key
- errors.add(:public_key, "does not appear to be a valid ssh-rsa or dsa public key")
- else
- # Valid if no other rows have this public key
- if self.class.where('uuid != ? and public_key like ?',
- uuid || '', "%#{self.public_key}%").any?
- errors.add(:public_key, "already exists in the database, use a different key.")
- return false
- end
+ # Valid if no other rows have this public key
+ if self.class.where('uuid != ? and public_key like ?',
+ uuid || '', "%#{self.public_key}%").any?
+ errors.add(:public_key, "already exists in the database, use a different key.")
+ return false
end
end
return true
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index b4660dbd35..16e85c0dd9 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -329,17 +329,7 @@ class Collection < ArvadosModel
end
def sync_past_versions
- updates = self.syncable_updates
- Collection.where('current_version_uuid = ? AND uuid != ?', self.uuid_before_last_save, self.uuid_before_last_save).each do |c|
- c.attributes = updates
- # Use a different validation context to skip the 'past_versions_cannot_be_updated'
- # validator, as on this case it is legal to update some fields.
- leave_modified_by_user_alone do
- leave_modified_at_alone do
- c.save(context: :update_old_versions)
- end
- end
- end
+ Collection.where('current_version_uuid = ? AND uuid != ?', self.uuid_before_last_save, self.uuid_before_last_save).update_all self.syncable_updates
end
def versionable_updates?(attrs)
diff --git a/services/api/app/models/container.rb b/services/api/app/models/container.rb
index 43af0721c4..ee338b81ff 100644
--- a/services/api/app/models/container.rb
+++ b/services/api/app/models/container.rb
@@ -5,7 +5,7 @@
require 'log_reuse_info'
require 'whitelist_update'
require 'safe_json'
-require 'update_priority'
+require 'update_priorities'
class Container < ArvadosModel
include ArvadosModelUpdates
@@ -50,11 +50,16 @@ class Container < ArvadosModel
before_save :clear_runtime_status_when_queued
after_save :update_cr_logs
after_save :handle_completed
- after_save :propagate_priority
- after_commit { UpdatePriority.run_update_thread }
- has_many :container_requests, :foreign_key => :container_uuid, :class_name => 'ContainerRequest', :primary_key => :uuid
- belongs_to :auth, :class_name => 'ApiClientAuthorization', :foreign_key => :auth_uuid, :primary_key => :uuid
+ has_many :container_requests,
+ class_name: 'ContainerRequest',
+ foreign_key: 'container_uuid',
+ primary_key: 'uuid'
+ belongs_to :auth,
+ class_name: 'ApiClientAuthorization',
+ foreign_key: 'auth_uuid',
+ primary_key: 'uuid',
+ optional: true
api_accessible :user, extend: :common do |t|
t.add :command
@@ -83,6 +88,8 @@ class Container < ArvadosModel
t.add :interactive_session_started
t.add :output_storage_classes
t.add :output_properties
+ t.add :cost
+ t.add :subrequests_cost
end
# Supported states for a container
@@ -129,35 +136,8 @@ class Container < ArvadosModel
# priority of a user-submitted request is a function of
# user-assigned priority and request creation time.
def update_priority!
- return if ![Queued, Locked, Running].include?(state)
- p = ContainerRequest.
- where('container_uuid=? and priority>0', uuid).
- includes(:requesting_container).
- lock(true).
- map do |cr|
- if cr.requesting_container
- cr.requesting_container.priority
- else
- (cr.priority << 50) - (cr.created_at.to_time.to_f * 1000).to_i
- end
- end.max || 0
- update_attributes!(priority: p)
- end
-
- def propagate_priority
- return true unless saved_change_to_priority?
- act_as_system_user do
- # Update the priority of child container requests to match new
- # priority of the parent container (ignoring requests with no
- # container assigned, because their priority doesn't matter).
- ContainerRequest.
- where(requesting_container_uuid: self.uuid,
- state: ContainerRequest::Committed).
- where('container_uuid is not null').
- includes(:container).
- map(&:container).
- map(&:update_priority!)
- end
+ update_priorities uuid
+ reload
end
# Create a new container (or find an existing one) to satisfy the
@@ -225,6 +205,9 @@ class Container < ArvadosModel
if rc['keep_cache_ram'] == 0
rc['keep_cache_ram'] = Rails.configuration.Containers.DefaultKeepCacheRAM
end
+ if rc['keep_cache_disk'] == 0 and rc['keep_cache_ram'] == 0
+ rc['keep_cache_disk'] = bound_keep_cache_disk(rc['ram'])
+ end
rc
end
@@ -291,21 +274,55 @@ class Container < ArvadosModel
candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)
log_reuse_info(candidates) { "after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}" }
- if attrs[:runtime_constraints]['cuda'].nil?
- attrs[:runtime_constraints]['cuda'] = {
- 'device_count' => 0,
- 'driver_version' => '',
- 'hardware_capability' => '',
- }
- end
- resolved_runtime_constraints = [resolve_runtime_constraints(attrs[:runtime_constraints])]
- if resolved_runtime_constraints[0]['cuda']['device_count'] == 0
- # If no CUDA requested, extend search to include older container
- # records that don't have a 'cuda' section in runtime_constraints
- resolved_runtime_constraints << resolved_runtime_constraints[0].except('cuda')
- end
-
- candidates = candidates.where_serialized(:runtime_constraints, resolved_runtime_constraints, md5: true, multivalue: true)
+ resolved_runtime_constraints = resolve_runtime_constraints(attrs[:runtime_constraints])
+ # Ideally we would completely ignore Keep cache constraints when making
+ # reuse considerations, but our database structure makes that impractical.
+ # The best we can do is generate a search that matches on all likely values.
+ runtime_constraint_variations = {
+ keep_cache_disk: [
+ # Check for constraints without keep_cache_disk
+ # (containers that predate the constraint)
+ nil,
+ # Containers that use keep_cache_ram instead
+ 0,
+ # The default value
+ bound_keep_cache_disk(resolved_runtime_constraints['ram']),
+ # The minimum default bound
+ bound_keep_cache_disk(0),
+ # The maximum default bound (presumably)
+ bound_keep_cache_disk(1 << 60),
+ # The requested value
+ resolved_runtime_constraints.delete('keep_cache_disk'),
+ ].uniq,
+ keep_cache_ram: [
+ # Containers that use keep_cache_disk instead
+ 0,
+ # The default value
+ Rails.configuration.Containers.DefaultKeepCacheRAM,
+ # The requested value
+ resolved_runtime_constraints.delete('keep_cache_ram'),
+ ].uniq,
+ }
+ resolved_cuda = resolved_runtime_constraints['cuda']
+ if resolved_cuda.nil? or resolved_cuda['device_count'] == 0
+ runtime_constraint_variations[:cuda] = [
+ # Check for constraints without cuda
+ # (containers that predate the constraint)
+ nil,
+ # The default "don't need CUDA" value
+ {
+ 'device_count' => 0,
+ 'driver_version' => '',
+ 'hardware_capability' => '',
+ },
+ # The requested value
+ resolved_runtime_constraints.delete('cuda')
+ ].uniq
+ end
+ reusable_runtime_constraints = hash_product(**runtime_constraint_variations)
+ .map { |v| resolved_runtime_constraints.merge(v) }
+
+ candidates = candidates.where_serialized(:runtime_constraints, reusable_runtime_constraints, md5: true, multivalue: true)
log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
log_reuse_info { "checking for state=Complete with readable output and log..." }
@@ -333,7 +350,7 @@ class Container < ArvadosModel
# Check for non-failing Running candidates and return the most likely to finish sooner.
log_reuse_info { "checking for state=Running..." }
running = candidates.where(state: Running).
- where("(runtime_status->'error') is null").
+ where("(runtime_status->'error') is null and priority > 0").
order('progress desc, started_at asc').
limit(1).first
if running
@@ -347,10 +364,15 @@ class Container < ArvadosModel
locked_or_queued = candidates.
where("state IN (?)", [Locked, Queued]).
order('state asc, priority desc, created_at asc').
- limit(1).first
- if locked_or_queued
- log_reuse_info { "done, reusing container #{locked_or_queued.uuid} with state=#{locked_or_queued.state}" }
- return locked_or_queued
+ limit(1)
+ if !attrs[:scheduling_parameters]['preemptible']
+ locked_or_queued = locked_or_queued.
+ where("not ((scheduling_parameters::jsonb)->>'preemptible')::boolean")
+ end
+ chosen = locked_or_queued.first
+ if chosen
+ log_reuse_info { "done, reusing container #{chosen.uuid} with state=#{chosen.state}" }
+ return chosen
else
log_reuse_info { "have no containers in Locked or Queued state" }
end
@@ -364,7 +386,7 @@ class Container < ArvadosModel
if self.state != Queued
raise LockFailedError.new("cannot lock when #{self.state}")
end
- self.update_attributes!(state: Locked)
+ self.update!(state: Locked)
end
end
@@ -382,7 +404,7 @@ class Container < ArvadosModel
if self.state != Locked
raise InvalidStateTransitionError.new("cannot unlock when #{self.state}")
end
- self.update_attributes!(state: Queued)
+ self.update!(state: Queued)
end
end
@@ -430,6 +452,31 @@ class Container < ArvadosModel
protected
+ def self.bound_keep_cache_disk(value)
+ value ||= 0
+ min_value = 2 << 30
+ max_value = 32 << 30
+ if value < min_value
+ min_value
+ elsif value > max_value
+ max_value
+ else
+ value
+ end
+ end
+
+ def self.hash_product(**kwargs)
+ # kwargs is a hash that maps parameters to an array of values.
+ # This function enumerates every possible hash where each key has one of
+ # the values from its array.
+ # The output keys are strings since that's what container hash attributes
+ # want.
+ # A nil value yields a hash without that key.
+ [[:_, nil]].product(
+ *kwargs.map { |(key, values)| [key.to_s].product(values) },
+ ).map { |param_pairs| Hash[param_pairs].compact }
+ end
+
def fill_field_defaults
self.state ||= Queued
self.environment ||= {}
@@ -478,8 +525,9 @@ class Container < ArvadosModel
def validate_change
permitted = [:state]
- progress_attrs = [:progress, :runtime_status, :log, :output, :output_properties, :exit_code]
final_attrs = [:finished_at]
+ progress_attrs = [:progress, :runtime_status, :subrequests_cost, :cost,
+ :log, :output, :output_properties, :exit_code]
if self.new_record?
permitted.push(:owner_uuid, :command, :container_image, :cwd,
@@ -516,7 +564,7 @@ class Container < ArvadosModel
when Running
permitted.push :finished_at, *progress_attrs
when Queued, Locked
- permitted.push :finished_at, :log, :runtime_status
+ permitted.push :finished_at, :log, :runtime_status, :cost
end
else
@@ -583,7 +631,7 @@ class Container < ArvadosModel
# each requesting CR.
return if self.final? || !saved_change_to_log?
leave_modified_by_user_alone do
- ContainerRequest.where(container_uuid: self.uuid).each do |cr|
+ ContainerRequest.where(container_uuid: self.uuid, state: ContainerRequest::Committed).each do |cr|
cr.update_collections(container: self, collections: ['log'])
cr.save!
end
@@ -601,7 +649,7 @@ class Container < ArvadosModel
# ensure the token doesn't validate later in the same
# transaction (e.g., in a test case) by satisfying expires_at >
# transaction timestamp.
- self.auth.andand.update_attributes(expires_at: db_transaction_time)
+ self.auth.andand.update(expires_at: db_transaction_time)
self.auth = nil
return
elsif self.auth
@@ -694,12 +742,57 @@ class Container < ArvadosModel
self.with_lock do
act_as_system_user do
if self.state == Cancelled
- retryable_requests = ContainerRequest.where("container_uuid = ? and priority > 0 and state = 'Committed' and container_count < container_count_max", uuid)
+ # Cancelled means the container didn't run to completion.
+ # This happens either because it was cancelled by the user
+ # or because there was an infrastructure failure. We want
+ # to retry infrastructure failures automatically.
+ #
+ # Seach for live container requests to determine if we
+ # should retry the container.
+ retryable_requests = ContainerRequest.
+ joins('left outer join containers as requesting_container on container_requests.requesting_container_uuid = requesting_container.uuid').
+ where("container_requests.container_uuid = ? and "+
+ "container_requests.priority > 0 and "+
+ "container_requests.owner_uuid not in (select group_uuid from trashed_groups) and "+
+ "(requesting_container.priority is null or (requesting_container.state = 'Running' and requesting_container.priority > 0)) and "+
+ "container_requests.state = 'Committed' and "+
+ "container_requests.container_count < container_requests.container_count_max", uuid).
+ order('container_requests.uuid asc')
else
retryable_requests = []
end
if retryable_requests.any?
+ scheduling_parameters = {
+ # partitions: empty if any are empty, else the union of all parameters
+ "partitions": retryable_requests
+ .map { |req| req.scheduling_parameters["partitions"] || [] }
+ .reduce { |cur, new| (cur.empty? or new.empty?) ? [] : (cur | new) },
+
+ # preemptible: true if all are true, else false
+ "preemptible": retryable_requests
+ .map { |req| req.scheduling_parameters["preemptible"] }
+ .all?,
+
+ # supervisor: true if all any true, else false
+ "supervisor": retryable_requests
+ .map { |req| req.scheduling_parameters["supervisor"] }
+ .any?,
+
+ # max_run_time: 0 if any are 0 (unlimited), else the maximum
+ "max_run_time": retryable_requests
+ .map { |req| req.scheduling_parameters["max_run_time"] || 0 }
+ .reduce do |cur, new|
+ if cur == 0 or new == 0
+ 0
+ elsif new > cur
+ new
+ else
+ cur
+ end
+ end,
+ }
+
c_attrs = {
command: self.command,
cwd: self.cwd,
@@ -708,7 +801,7 @@ class Container < ArvadosModel
container_image: self.container_image,
mounts: self.mounts,
runtime_constraints: self.runtime_constraints,
- scheduling_parameters: self.scheduling_parameters,
+ scheduling_parameters: scheduling_parameters,
secret_mounts: prev_secret_mounts,
runtime_token: prev_runtime_token,
runtime_user_uuid: self.runtime_user_uuid,
@@ -719,6 +812,7 @@ class Container < ArvadosModel
cr.with_lock do
leave_modified_by_user_alone do
# Use row locking because this increments container_count
+ cr.cumulative_cost += self.cost + self.subrequests_cost
cr.container_uuid = c.uuid
cr.save!
end
@@ -736,19 +830,19 @@ class Container < ArvadosModel
# Cancel outstanding container requests made by this container.
ContainerRequest.
- includes(:container).
where(requesting_container_uuid: uuid,
- state: ContainerRequest::Committed).each do |cr|
+ state: ContainerRequest::Committed).
+ in_batches(of: 15).each_record do |cr|
leave_modified_by_user_alone do
- cr.update_attributes!(priority: 0)
- cr.container.reload
- if cr.container.state == Container::Queued || cr.container.state == Container::Locked
+ cr.set_priority_zero
+ container_state = Container.where(uuid: cr.container_uuid).pluck(:state).first
+ if container_state == Container::Queued || container_state == Container::Locked
# If the child container hasn't started yet, finalize the
# child CR now instead of leaving it "on hold", i.e.,
# Queued with priority 0. (OTOH, if the child is already
# running, leave it alone so it can get cancelled the
# usual way, get a copy of the log collection, etc.)
- cr.update_attributes!(state: ContainerRequest::Final)
+ cr.update!(state: ContainerRequest::Final)
end
end
end
diff --git a/services/api/app/models/container_request.rb b/services/api/app/models/container_request.rb
index 9116035905..f5789f31f6 100644
--- a/services/api/app/models/container_request.rb
+++ b/services/api/app/models/container_request.rb
@@ -12,12 +12,15 @@ class ContainerRequest < ArvadosModel
include CommonApiTemplate
include WhitelistUpdate
- belongs_to :container, foreign_key: :container_uuid, primary_key: :uuid
- belongs_to :requesting_container, {
- class_name: 'Container',
- foreign_key: :requesting_container_uuid,
- primary_key: :uuid,
- }
+ belongs_to :container,
+ foreign_key: 'container_uuid',
+ primary_key: 'uuid',
+ optional: true
+ belongs_to :requesting_container,
+ class_name: 'Container',
+ foreign_key: 'requesting_container_uuid',
+ primary_key: 'uuid',
+ optional: true
# Posgresql JSONB columns should NOT be declared as serialized, Rails 5
# already know how to properly treat them.
@@ -33,6 +36,7 @@ class ContainerRequest < ArvadosModel
serialize :scheduling_parameters, Hash
after_find :fill_container_defaults_after_find
+ after_initialize { @state_was_when_initialized = self.state_was } # see finalize_if_needed
before_validation :fill_field_defaults, :if => :new_record?
before_validation :fill_container_defaults
validates :command, :container_image, :output_path, :cwd, :presence => true
@@ -80,6 +84,7 @@ class ContainerRequest < ArvadosModel
t.add :use_existing
t.add :output_storage_classes
t.add :output_properties
+ t.add :cumulative_cost
end
# Supported states for a container request
@@ -162,7 +167,7 @@ class ContainerRequest < ArvadosModel
end
elsif state == Committed
# Behave as if the container is cancelled
- update_attributes!(state: Final)
+ update!(state: Final)
end
return true
end
@@ -173,8 +178,39 @@ class ContainerRequest < ArvadosModel
def finalize!
container = Container.find_by_uuid(container_uuid)
if !container.nil?
- update_collections(container: container)
+ # We don't want to add the container cost if the container was
+ # already finished when this CR was committed. But we are
+ # running in an after_save hook after a lock/reload, so
+ # state_was has already been updated to Committed regardless.
+ # Hence the need for @state_was_when_initialized.
+ if @state_was_when_initialized == Committed
+ # Add the final container cost to our cumulative cost (which
+ # may already be non-zero from previous attempts if
+ # container_count_max > 1).
+ self.cumulative_cost += container.cost + container.subrequests_cost
+ end
+
+ # Add our cumulative cost to the subrequests_cost of the
+ # requesting container, if any.
+ if self.requesting_container_uuid
+ Container.where(
+ uuid: self.requesting_container_uuid,
+ state: Container::Running,
+ ).each do |c|
+ c.subrequests_cost += self.cumulative_cost
+ c.save!
+ end
+ end
+ update_collections(container: container)
+ # update_collections makes a log collection that includes all of the logs
+ # for all of the containers associated with this request. For requests
+ # that are retried, this is the primary way users can get logs for
+ # failed containers.
+ # The code below makes a log collection that is a verbatim copy of the
+ # container's logs. This is required for container reuse: a container
+ # will not be reused if the owner cannot read a collection with its logs.
+ # See the "readable log" section of Container.find_reusable().
if container.state == Container::Complete
log_col = Collection.where(portable_data_hash: container.log).first
if log_col
@@ -195,10 +231,17 @@ class ContainerRequest < ArvadosModel
end
end
end
- update_attributes!(state: Final)
+ update!(state: Final)
end
def update_collections(container:, collections: ['log', 'output'])
+
+ # Check if parent is frozen or trashed, in which case it isn't
+ # valid to create new collections in the project, so return
+ # without creating anything.
+ owner = Group.find_by_uuid(self.owner_uuid)
+ return if owner && !owner.admin_change_permitted
+
collections.each do |out_type|
pdh = container.send(out_type)
next if pdh.nil?
@@ -267,6 +310,10 @@ class ContainerRequest < ArvadosModel
super - ["mounts", "secret_mounts", "secret_mounts_md5", "runtime_token", "output_storage_classes"]
end
+ def set_priority_zero
+ self.update!(priority: 0) if self.priority > 0 && self.state != Final
+ end
+
protected
def fill_field_defaults
@@ -294,6 +341,10 @@ class ContainerRequest < ArvadosModel
return false
end
if state_changed? and state == Committed and container_uuid.nil?
+ if self.command.length > 0 and self.command[0] == "arvados-cwl-runner"
+ # Special case, arvados-cwl-runner processes are always considered "supervisors"
+ self.scheduling_parameters['supervisor'] = true
+ end
while true
c = Container.resolve(self)
c.lock!
@@ -311,10 +362,11 @@ class ContainerRequest < ArvadosModel
self.container_count += 1
return if self.container_uuid_was.nil?
- old_container = Container.find_by_uuid(self.container_uuid_was)
- return if old_container.nil?
+ old_container_uuid = self.container_uuid_was
+ old_container_log = Container.where(uuid: old_container_uuid).pluck(:log).first
+ return if old_container_log.nil?
- old_logs = Collection.where(portable_data_hash: old_container.log).first
+ old_logs = Collection.where(portable_data_hash: old_container_log).first
return if old_logs.nil?
log_coll = self.log_uuid.nil? ? nil : Collection.where(uuid: self.log_uuid).first
@@ -329,7 +381,7 @@ class ContainerRequest < ArvadosModel
# copy logs from old container into CR's log collection
src = Arv::Collection.new(old_logs.manifest_text)
dst = Arv::Collection.new(log_coll.manifest_text)
- dst.cp_r("./", "log for container #{old_container.uuid}", src)
+ dst.cp_r("./", "log for container #{old_container_uuid}", src)
manifest = dst.manifest_text
log_coll.assign_attributes(
@@ -418,8 +470,9 @@ class ContainerRequest < ArvadosModel
def validate_scheduling_parameters
if self.state == Committed
- if scheduling_parameters.include? 'partitions' and
- (!scheduling_parameters['partitions'].is_a?(Array) ||
+ if scheduling_parameters.include?('partitions') and
+ !scheduling_parameters['partitions'].nil? and
+ (!scheduling_parameters['partitions'].is_a?(Array) ||
scheduling_parameters['partitions'].reject{|x| !x.is_a?(String)}.size !=
scheduling_parameters['partitions'].size)
errors.add :scheduling_parameters, "partitions must be an array of strings"
@@ -461,7 +514,7 @@ class ContainerRequest < ArvadosModel
case self.state
when Committed
- permitted.push :priority, :container_count_max, :container_uuid
+ permitted.push :priority, :container_count_max, :container_uuid, :cumulative_cost
if self.priority.nil?
self.errors.add :priority, "cannot be nil"
@@ -478,7 +531,7 @@ class ContainerRequest < ArvadosModel
when Final
if self.state_was == Committed
# "Cancel" means setting priority=0, state=Committed
- permitted.push :priority
+ permitted.push :priority, :cumulative_cost
if current_user.andand.is_admin
permitted.push :output_uuid, :log_uuid
@@ -520,15 +573,8 @@ class ContainerRequest < ArvadosModel
def update_priority
return unless saved_change_to_state? || saved_change_to_priority? || saved_change_to_container_uuid?
- act_as_system_user do
- Container.
- where('uuid in (?)', [container_uuid_before_last_save, self.container_uuid].compact).
- map(&:update_priority!)
- end
- end
-
- def set_priority_zero
- self.update_attributes!(priority: 0) if self.state != Final
+ update_priorities container_uuid_before_last_save if !container_uuid_before_last_save.nil? and container_uuid_before_last_save != self.container_uuid
+ update_priorities self.container_uuid if self.container_uuid
end
def set_requesting_container_uuid
diff --git a/services/api/app/models/group.rb b/services/api/app/models/group.rb
index 0c36a048dc..d4c81fe9d1 100644
--- a/services/api/app/models/group.rb
+++ b/services/api/app/models/group.rb
@@ -4,6 +4,7 @@
require 'can_be_an_owner'
require 'trashable'
+require 'update_priorities'
class Group < ArvadosModel
include HasUuid
@@ -48,12 +49,19 @@ class Group < ArvadosModel
t.add :can_manage
end
+ # check if admins are allowed to make changes to the project, e.g. it
+ # isn't trashed or frozen.
+ def admin_change_permitted
+ !(FrozenGroup.where(uuid: self.uuid).any? || TrashedGroup.where(group_uuid: self.uuid).any?)
+ end
+
protected
def self.attributes_required_columns
super.merge(
'can_write' => ['owner_uuid', 'uuid'],
'can_manage' => ['owner_uuid', 'uuid'],
+ 'writable_by' => ['owner_uuid', 'uuid'],
)
end
@@ -155,56 +163,70 @@ class Group < ArvadosModel
# Remove groups that don't belong from trash
# Add/update groups that do belong in the trash
- temptable = "group_subtree_#{rand(2**64).to_s(10)}"
- ActiveRecord::Base.connection.exec_query(
- "create temporary table #{temptable} on commit drop " +
- "as select * from project_subtree_with_trash_at($1, LEAST($2, $3)::timestamp)",
+ frozen_descendants = ActiveRecord::Base.connection.exec_query(%{
+with temptable as (select * from project_subtree_with_trash_at($1, LEAST($2, $3)::timestamp))
+ select uuid from frozen_groups, temptable where uuid = target_uuid
+},
"Group.update_trash.select",
- [[nil, self.uuid],
- [nil, TrashedGroup.find_by_group_uuid(self.owner_uuid).andand.trash_at],
- [nil, self.trash_at]])
- frozen_descendants = ActiveRecord::Base.connection.exec_query(
- "select uuid from frozen_groups, #{temptable} where uuid = target_uuid",
- "Group.update_trash.check_frozen")
+ [self.uuid,
+ TrashedGroup.find_by_group_uuid(self.owner_uuid).andand.trash_at,
+ self.trash_at])
if frozen_descendants.any?
raise ArgumentError.new("cannot trash project containing frozen project #{frozen_descendants[0]["uuid"]}")
end
- ActiveRecord::Base.connection.exec_delete(
- "delete from trashed_groups where group_uuid in (select target_uuid from #{temptable} where trash_at is NULL)",
- "Group.update_trash.delete")
- ActiveRecord::Base.connection.exec_query(
- "insert into trashed_groups (group_uuid, trash_at) "+
- "select target_uuid as group_uuid, trash_at from #{temptable} where trash_at is not NULL " +
- "on conflict (group_uuid) do update set trash_at=EXCLUDED.trash_at",
- "Group.update_trash.insert")
+
+ ActiveRecord::Base.connection.exec_query(%{
+with temptable as (select * from project_subtree_with_trash_at($1, LEAST($2, $3)::timestamp)),
+
+delete_rows as (delete from trashed_groups where group_uuid in (select target_uuid from temptable where trash_at is NULL)),
+
+insert_rows as (insert into trashed_groups (group_uuid, trash_at)
+ select target_uuid as group_uuid, trash_at from temptable where trash_at is not NULL
+ on conflict (group_uuid) do update set trash_at=EXCLUDED.trash_at)
+
+select container_uuid from container_requests where
+ owner_uuid in (select target_uuid from temptable) and
+ requesting_container_uuid is NULL and state = 'Committed' and container_uuid is not NULL
+},
+ "Group.update_trash.select",
+ [self.uuid,
+ TrashedGroup.find_by_group_uuid(self.owner_uuid).andand.trash_at,
+ self.trash_at]).each do |container_uuid|
+ update_priorities container_uuid["container_uuid"]
+ end
end
def update_frozen
return unless saved_change_to_frozen_by_uuid? || saved_change_to_owner_uuid?
- temptable = "group_subtree_#{rand(2**64).to_s(10)}"
- ActiveRecord::Base.connection.exec_query(
- "create temporary table #{temptable} on commit drop as select * from project_subtree_with_is_frozen($1,$2)",
- "Group.update_frozen.select",
- [[nil, self.uuid],
- [nil, !self.frozen_by_uuid.nil?]])
+
if frozen_by_uuid
- rows = ActiveRecord::Base.connection.exec_query(
- "select cr.uuid, cr.state from container_requests cr, #{temptable} frozen " +
- "where cr.owner_uuid = frozen.uuid and frozen.is_frozen " +
- "and cr.state not in ($1, $2) limit 1",
- "Group.update_frozen.check_container_requests",
- [[nil, ContainerRequest::Uncommitted],
- [nil, ContainerRequest::Final]])
+ rows = ActiveRecord::Base.connection.exec_query(%{
+with temptable as (select * from project_subtree_with_is_frozen($1,$2))
+
+select cr.uuid, cr.state from container_requests cr, temptable frozen
+ where cr.owner_uuid = frozen.uuid and frozen.is_frozen
+ and cr.state not in ($3, $4) limit 1
+},
+ "Group.update_frozen.check_container_requests",
+ [self.uuid,
+ !self.frozen_by_uuid.nil?,
+ ContainerRequest::Uncommitted,
+ ContainerRequest::Final])
if rows.any?
raise ArgumentError.new("cannot freeze project containing container request #{rows.first['uuid']} with state = #{rows.first['state']}")
end
end
- ActiveRecord::Base.connection.exec_delete(
- "delete from frozen_groups where uuid in (select uuid from #{temptable} where not is_frozen)",
- "Group.update_frozen.delete")
- ActiveRecord::Base.connection.exec_query(
- "insert into frozen_groups (uuid) select uuid from #{temptable} where is_frozen on conflict do nothing",
- "Group.update_frozen.insert")
+
+ActiveRecord::Base.connection.exec_query(%{
+with temptable as (select * from project_subtree_with_is_frozen($1,$2)),
+
+delete_rows as (delete from frozen_groups where uuid in (select uuid from temptable where not is_frozen))
+
+insert into frozen_groups (uuid) select uuid from temptable where is_frozen on conflict do nothing
+}, "Group.update_frozen.update",
+ [self.uuid,
+ !self.frozen_by_uuid.nil?])
+
end
def before_ownership_change
@@ -225,11 +247,11 @@ class Group < ArvadosModel
ActiveRecord::Base.connection.exec_delete(
"delete from trashed_groups where group_uuid=$1",
"Group.clear_permissions_trash_frozen",
- [[nil, self.uuid]])
+ [self.uuid])
ActiveRecord::Base.connection.exec_delete(
"delete from frozen_groups where uuid=$1",
"Group.clear_permissions_trash_frozen",
- [[nil, self.uuid]])
+ [self.uuid])
end
def assign_name
@@ -250,7 +272,7 @@ class Group < ArvadosModel
if self.owner_uuid != system_user_uuid
raise "Owner uuid for role must be system user"
end
- raise PermissionDeniedError unless current_user.can?(manage: uuid)
+ raise PermissionDeniedError.new("role group cannot be modified without can_manage permission") unless current_user.can?(manage: uuid)
true
else
super
@@ -268,6 +290,18 @@ class Group < ArvadosModel
end
end
+ def permission_to_create
+ if !super
+ return false
+ elsif group_class == "role" &&
+ !Rails.configuration.Users.CanCreateRoleGroups &&
+ !current_user.andand.is_admin
+ raise PermissionDeniedError.new("this cluster does not allow users to create role groups")
+ else
+ return true
+ end
+ end
+
def permission_to_update
if !super
return false
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
index 37e5f455df..029a313285 100644
--- a/services/api/app/models/job.rb
+++ b/services/api/app/models/job.rb
@@ -50,7 +50,7 @@ class Job < ArvadosModel
before_create :create_disabled
before_update :update_disabled
- has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
+ has_many(:nodes, foreign_key: 'job_uuid', primary_key: 'uuid')
class SubmitIdReused < RequestError
end
@@ -107,7 +107,7 @@ class Job < ArvadosModel
end
def assert_finished
- update_attributes(finished_at: finished_at || db_current_time,
+ update(finished_at: finished_at || db_current_time,
success: success.nil? ? false : success,
running: false)
end
diff --git a/services/api/app/models/keep_disk.rb b/services/api/app/models/keep_disk.rb
index 5751c135d8..589936f845 100644
--- a/services/api/app/models/keep_disk.rb
+++ b/services/api/app/models/keep_disk.rb
@@ -40,7 +40,7 @@ class KeepDisk < ArvadosModel
end
@bypass_arvados_authorization = true
- self.update_attributes!(o.select { |k,v|
+ self.update!(o.select { |k,v|
[:bytes_total,
:bytes_free,
:is_readable,
diff --git a/services/api/app/models/link.rb b/services/api/app/models/link.rb
index 83043a56d1..2eb6b88a0c 100644
--- a/services/api/app/models/link.rb
+++ b/services/api/app/models/link.rb
@@ -14,10 +14,15 @@ class Link < ArvadosModel
validate :name_links_are_obsolete
validate :permission_to_attach_to_objects
before_update :restrict_alter_permissions
- after_update :call_update_permissions
- after_create :call_update_permissions
+ before_update :apply_max_overlapping_permissions
+ before_create :apply_max_overlapping_permissions
+ after_update :delete_overlapping_permissions
+ after_update :call_update_permissions, :if => Proc.new { @need_update_permissions }
+ after_create :call_update_permissions, :if => Proc.new { @need_update_permissions }
before_destroy :clear_permissions
+ after_destroy :delete_overlapping_permissions
after_destroy :check_permissions
+ before_save :check_need_update_permissions
api_accessible :user, extend: :common do |t|
t.add :tail_uuid
@@ -29,6 +34,58 @@ class Link < ArvadosModel
t.add :properties
end
+ PermLevel = {
+ 'can_read' => 0,
+ 'can_write' => 1,
+ 'can_manage' => 2,
+ }
+
+ def apply_max_overlapping_permissions
+ return if self.link_class != 'permission' || !PermLevel[self.name]
+ Link.
+ lock. # select ... for update
+ where(link_class: 'permission',
+ tail_uuid: self.tail_uuid,
+ head_uuid: self.head_uuid,
+ name: PermLevel.keys).
+ where('uuid <> ?', self.uuid).each do |other|
+ if PermLevel[other.name] > PermLevel[self.name]
+ self.name = other.name
+ end
+ end
+ end
+
+ def delete_overlapping_permissions
+ return if self.link_class != 'permission'
+ redundant = nil
+ if PermLevel[self.name]
+ redundant = Link.
+ lock. # select ... for update
+ where(link_class: 'permission',
+ tail_uuid: self.tail_uuid,
+ head_uuid: self.head_uuid,
+ name: PermLevel.keys).
+ where('uuid <> ?', self.uuid)
+ elsif self.name == 'can_login' &&
+ self.properties.respond_to?(:has_key?) &&
+ self.properties.has_key?('username')
+ redundant = Link.
+ lock. # select ... for update
+ where(link_class: 'permission',
+ tail_uuid: self.tail_uuid,
+ head_uuid: self.head_uuid,
+ name: 'can_login').
+ where('properties @> ?', SafeJSON.dump({'username' => self.properties['username']})).
+ where('uuid <> ?', self.uuid)
+ end
+ if redundant
+ redundant.each do |link|
+ link.clear_permissions
+ end
+ redundant.delete_all
+ end
+ end
+
def head_kind
if k = ArvadosModel::resource_class_for_uuid(head_uuid)
k.kind
@@ -133,11 +190,13 @@ class Link < ArvadosModel
'can_manage' => 3,
}
+ def check_need_update_permissions
+ @need_update_permissions = self.link_class == 'permission' && (name != name_was || new_record?)
+ end
+
def call_update_permissions
- if self.link_class == 'permission'
update_permissions tail_uuid, head_uuid, PERM_LEVEL[name], self.uuid
current_user.forget_cached_group_perms
- end
end
def clear_permissions
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index c8b463696b..f384ba582b 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -20,10 +20,14 @@ class Node < ArvadosModel
# Only a controller can figure out whether or not the current API tokens
# have access to the associated Job. They're expected to set
# job_readable=true if the Job UUID can be included in the API response.
- belongs_to(:job, foreign_key: :job_uuid, primary_key: :uuid)
+ belongs_to :job,
+ foreign_key: 'job_uuid',
+ primary_key: 'uuid',
+ optional: true
attr_accessor :job_readable
UNUSED_NODE_IP = '127.40.4.0'
+ MAX_VMS = 3
api_accessible :user, :extend => :common do |t|
t.add :hostname
@@ -158,8 +162,8 @@ class Node < ArvadosModel
LIMIT 1',
# query label:
'Node.available_slot_number',
- # [col_id, val] for $1 vars:
- [[nil, Rails.configuration.Containers.MaxComputeVMs]],
+ # bind vars:
+ [MAX_VMS],
).rows.first.andand.first
end
@@ -175,7 +179,7 @@ class Node < ArvadosModel
# as the new node. Clear the ip_address field on the stale
# nodes. Otherwise, we (via SLURM) might inadvertently connect
# to the new node using the old node's hostname.
- stale_node.update_attributes!(ip_address: nil)
+ stale_node.update!(ip_address: nil)
end
end
if hostname_before_last_save && saved_change_to_hostname?
@@ -267,7 +271,7 @@ class Node < ArvadosModel
!Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty? and
!Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname.empty?)
- (0..Rails.configuration.Containers.MaxComputeVMs-1).each do |slot_number|
+ (0..MAX_VMS-1).each do |slot_number|
hostname = hostname_for_slot(slot_number)
hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
if !File.exist? hostfile
diff --git a/services/api/app/models/pipeline_instance.rb b/services/api/app/models/pipeline_instance.rb
index 271b155aaf..0b0af8b87d 100644
--- a/services/api/app/models/pipeline_instance.rb
+++ b/services/api/app/models/pipeline_instance.rb
@@ -9,7 +9,10 @@ class PipelineInstance < ArvadosModel
serialize :components, Hash
serialize :properties, Hash
serialize :components_summary, Hash
- belongs_to :pipeline_template, :foreign_key => :pipeline_template_uuid, :primary_key => :uuid
+ belongs_to :pipeline_template,
+ foreign_key: 'pipeline_template_uuid',
+ primary_key: 'uuid',
+ optional: true
before_validation :bootstrap_components
before_validation :update_state
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 1662278cc3..5a95fb0b88 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -31,9 +31,10 @@ class User < ArvadosModel
after_update :setup_on_activate
before_create :check_auto_admin
- before_create :set_initial_username, :if => Proc.new {
- username.nil? and email
+ before_validation :set_initial_username, :if => Proc.new {
+ new_record? && email
}
+ before_create :active_is_not_nil
after_create :after_ownership_change
after_create :setup_on_activate
after_create :add_system_group_permission_link
@@ -56,8 +57,8 @@ class User < ArvadosModel
before_destroy :clear_permissions
after_destroy :remove_self_from_permissions
- has_many :authorized_keys, :foreign_key => :authorized_user_uuid, :primary_key => :uuid
- has_many :repositories, foreign_key: :owner_uuid, primary_key: :uuid
+ has_many :authorized_keys, foreign_key: 'authorized_user_uuid', primary_key: 'uuid'
+ has_many :repositories, foreign_key: 'owner_uuid', primary_key: 'uuid'
default_scope { where('redirect_to_user_uuid is null') }
@@ -104,6 +105,10 @@ class User < ArvadosModel
self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
end
+ def self.ignored_select_attributes
+ super + ["full_name", "is_invited"]
+ end
+
def groups_i_can(verb)
my_groups = self.group_permissions(VAL_FOR_PERM[verb]).keys
if verb == :read
@@ -145,10 +150,10 @@ SELECT 1 FROM #{PERMISSION_VIEW}
},
# "name" arg is a query label that appears in logs:
"user_can_query",
- [[nil, self.uuid],
- [nil, target_uuid],
- [nil, VAL_FOR_PERM[action]],
- [nil, target_owner_uuid]]
+ [self.uuid,
+ target_uuid,
+ VAL_FOR_PERM[action],
+ target_owner_uuid]
).any?
return false
end
@@ -237,7 +242,7 @@ SELECT target_uuid, perm_level
# "name" arg is a query label that appears in logs:
"User.group_permissions",
# "binds" arg is an array of [col_id, value] for '$1' vars:
- [[nil, uuid]]).
+ [uuid]).
rows.each do |group_uuid, max_p_val|
@group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
end
@@ -259,8 +264,7 @@ SELECT target_uuid, perm_level
def setup(repo_name: nil, vm_uuid: nil, send_notification_email: nil)
newly_invited = Link.where(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
- link_class: 'permission',
- name: 'can_read').empty?
+ link_class: 'permission').empty?
# Add can_read link from this user to "all users" which makes this
# user "invited", and (depending on config) a link in the opposite
@@ -308,31 +312,25 @@ SELECT target_uuid, perm_level
# delete oid_login_perms for this user
#
- # note: these permission links are obsolete, they have no effect
- # on anything and they are not created for new users.
+ # note: these permission links are obsolete anyway: they have no
+ # effect on anything and they are not created for new users.
Link.where(tail_uuid: self.email,
- link_class: 'permission',
- name: 'can_login').destroy_all
-
- # delete repo_perms for this user
- Link.where(tail_uuid: self.uuid,
- link_class: 'permission',
- name: 'can_manage').destroy_all
+ link_class: 'permission',
+ name: 'can_login').destroy_all
- # delete vm_login_perms for this user
- Link.where(tail_uuid: self.uuid,
- link_class: 'permission',
- name: 'can_login').destroy_all
-
- # delete "All users" group read permissions for this user
+ # Delete all sharing permissions so (a) the user doesn't
+ # automatically regain access to anything if re-setup in future,
+ # (b) the user doesn't appear in "currently shared with" lists
+ # shown to other users.
+ #
+ # Notably this includes the can_read -> "all users" group
+ # permission.
Link.where(tail_uuid: self.uuid,
- head_uuid: all_users_group_uuid,
- link_class: 'permission',
- name: 'can_read').destroy_all
+ link_class: 'permission').destroy_all
# delete any signatures by this user
Link.where(link_class: 'signature',
- tail_uuid: self.uuid).destroy_all
+ tail_uuid: self.uuid).destroy_all
# delete tokens for this user
ApiClientAuthorization.where(user_id: self.id).destroy_all
@@ -381,15 +379,18 @@ SELECT target_uuid, perm_level
#
if Link.where(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
- link_class: 'permission',
- name: 'can_read').any?
+ link_class: 'permission').any?
errors.add :is_active, "cannot be set to false directly, use the 'Deactivate' button on Workbench, or the 'unsetup' API call"
end
end
end
def set_initial_username(requested: false)
- if !requested.is_a?(String) || requested.empty?
+ if new_record? and requested == false and self.username != nil and self.username != ""
+ requested = self.username
+ end
+
+ if (!requested.is_a?(String) || requested.empty?) and email
email_parts = email.partition("@")
local_parts = email_parts.first.partition("+")
if email_parts.any?(&:empty?)
@@ -400,13 +401,20 @@ SELECT target_uuid, perm_level
requested = email_parts.first
end
end
- requested.sub!(/^[^A-Za-z]+/, "")
- requested.gsub!(/[^A-Za-z0-9]/, "")
- unless requested.empty?
+ if requested
+ requested.sub!(/^[^A-Za-z]+/, "")
+ requested.gsub!(/[^A-Za-z0-9]/, "")
+ end
+ unless !requested || requested.empty?
self.username = find_usable_username_from(requested)
end
end
+ def active_is_not_nil
+ self.is_active = false if self.is_active.nil?
+ self.is_admin = false if self.is_admin.nil?
+ end
+
# Move this user's (i.e., self's) owned items to new_owner_uuid and
# new_user_uuid (for things normally owned directly by the user).
#
@@ -504,14 +512,14 @@ SELECT target_uuid, perm_level
end
if redirect_to_new_user
- update_attributes!(redirect_to_user_uuid: new_user.uuid, username: nil)
+ update!(redirect_to_user_uuid: new_user.uuid, username: nil)
end
skip_check_permissions_against_full_refresh do
- update_permissions self.uuid, self.uuid, CAN_MANAGE_PERM
- update_permissions new_user.uuid, new_user.uuid, CAN_MANAGE_PERM
- update_permissions new_user.owner_uuid, new_user.uuid, CAN_MANAGE_PERM
+ update_permissions self.uuid, self.uuid, CAN_MANAGE_PERM, nil, true
+ update_permissions new_user.uuid, new_user.uuid, CAN_MANAGE_PERM, nil, true
+ update_permissions new_user.owner_uuid, new_user.uuid, CAN_MANAGE_PERM, nil, true
end
- update_permissions self.owner_uuid, self.uuid, CAN_MANAGE_PERM
+ update_permissions self.owner_uuid, self.uuid, CAN_MANAGE_PERM, nil, true
end
end
@@ -599,6 +607,151 @@ SELECT target_uuid, perm_level
primary_user
end
+ def self.update_remote_user remote_user
+ remote_user = remote_user.symbolize_keys
+ remote_user_prefix = remote_user[:uuid][0..4]
+
+ # interaction between is_invited and is_active
+ #
+ # either can flag can be nil, true or false
+ #
+ # in all cases, we create the user if they don't exist.
+ #
+ # invited nil, active nil: don't call setup or unsetup.
+ #
+ # invited nil, active false: call unsetup
+ #
+ # invited nil, active true: call setup and activate them.
+ #
+ #
+ # invited false, active nil: call unsetup
+ #
+ # invited false, active false: call unsetup
+ #
+ # invited false, active true: call unsetup
+ #
+ #
+ # invited true, active nil: call setup but don't change is_active
+ #
+ # invited true, active false: call setup but don't change is_active
+ #
+ # invited true, active true: call setup and activate them.
+
+ should_setup = (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+ Rails.configuration.Users.AutoSetupNewUsers or
+ Rails.configuration.Users.NewUsersAreActive or
+ Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+
+ should_activate = (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+ Rails.configuration.Users.NewUsersAreActive or
+ Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+
+ remote_should_be_unsetup = (remote_user[:is_invited] == nil && remote_user[:is_active] == false) ||
+ (remote_user[:is_invited] == false)
+
+ remote_should_be_setup = should_setup && (
+ (remote_user[:is_invited] == nil && remote_user[:is_active] == true) ||
+ (remote_user[:is_invited] == false && remote_user[:is_active] == true) ||
+ (remote_user[:is_invited] == true))
+
+ remote_should_be_active = should_activate && remote_user[:is_invited] != false && remote_user[:is_active] == true
+
+ # Make sure blank username is nil
+ remote_user[:username] = nil if remote_user[:username] == ""
+
+ begin
+ user = User.create_with(email: remote_user[:email],
+ username: remote_user[:username],
+ first_name: remote_user[:first_name],
+ last_name: remote_user[:last_name],
+ is_active: remote_should_be_active,
+ ).find_or_create_by(uuid: remote_user[:uuid])
+ rescue ActiveRecord::RecordNotUnique
+ retry
+ end
+
+ user.with_lock do
+ needupdate = {}
+ [:email, :username, :first_name, :last_name, :prefs].each do |k|
+ v = remote_user[k]
+ if !v.nil? && user.send(k) != v
+ needupdate[k] = v
+ end
+ end
+
+ user.email = needupdate[:email] if needupdate[:email]
+
+ loginCluster = Rails.configuration.Login.LoginCluster
+ if user.username.nil? || user.username == ""
+ # Don't have a username yet, try to set one
+ initial_username = user.set_initial_username(requested: remote_user[:username])
+ needupdate[:username] = initial_username if !initial_username.nil?
+ elsif remote_user_prefix != loginCluster
+ # Upstream is not login cluster, don't try to change the
+ # username once set.
+ needupdate.delete :username
+ end
+
+ if needupdate.length > 0
+ begin
+ user.update!(needupdate)
+ rescue ActiveRecord::RecordInvalid
+ if remote_user_prefix == loginCluster && !needupdate[:username].nil?
+ local_user = User.find_by_username(needupdate[:username])
+ # The username of this record conflicts with an existing,
+ # different user record. This can happen because the
+ # username changed upstream on the login cluster, or
+ # because we're federated with another cluster with a user
+ # by the same username. The login cluster is the source
+ # of truth, so change the username on the conflicting
+ # record and retry the update operation.
+ if local_user.uuid != user.uuid
+ new_username = "#{needupdate[:username]}#{rand(99999999)}"
+ Rails.logger.warn("cached username '#{needupdate[:username]}' collision with user '#{local_user.uuid}' - renaming to '#{new_username}' before retrying")
+ local_user.update!({username: new_username})
+ retry
+ end
+ end
+ raise # Not the issue we're handling above
+ end
+ elsif user.new_record?
+ begin
+ user.save!
+ rescue => e
+ Rails.logger.debug "Error saving user record: #{$!}"
+ Rails.logger.debug "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+ raise
+ end
+ end
+
+ if remote_should_be_unsetup
+ # Remote user is not "invited" or "active" state on their home
+ # cluster, so they should be unsetup, which also makes them
+ # inactive.
+ user.unsetup
+ else
+ if !user.is_invited && remote_should_be_setup
+ user.setup
+ end
+
+ if !user.is_active && remote_should_be_active
+ # remote user is active and invited, we need to activate them
+ user.update!(is_active: true)
+ end
+
+ if remote_user_prefix == Rails.configuration.Login.LoginCluster and
+ user.is_active and
+ !remote_user[:is_admin].nil? and
+ user.is_admin != remote_user[:is_admin]
+ # Remote cluster controls our user database, including the
+ # admin flag.
+ user.update!(is_admin: remote_user[:is_admin])
+ end
+ end
+ end
+ user
+ end
+
protected
def self.attributes_required_columns
@@ -785,11 +938,11 @@ SELECT target_uuid, perm_level
resp = [Link.where(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
link_class: 'permission',
- name: 'can_read').first ||
+ name: 'can_write').first ||
Link.create(tail_uuid: self.uuid,
head_uuid: all_users_group_uuid,
link_class: 'permission',
- name: 'can_read')]
+ name: 'can_write')]
if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers
resp += [Link.where(tail_uuid: all_users_group_uuid,
head_uuid: self.uuid,
@@ -817,8 +970,9 @@ SELECT target_uuid, perm_level
# Send admin notifications
def send_admin_notifications
- AdminNotifier.new_user(self).deliver_now
- if not self.is_active then
+ if self.is_invited then
+ AdminNotifier.new_user(self).deliver_now
+ else
AdminNotifier.new_inactive_user(self).deliver_now
end
end
diff --git a/services/api/app/models/virtual_machine.rb b/services/api/app/models/virtual_machine.rb
index 0b3557eef6..09687385ca 100644
--- a/services/api/app/models/virtual_machine.rb
+++ b/services/api/app/models/virtual_machine.rb
@@ -9,9 +9,9 @@ class VirtualMachine < ArvadosModel
has_many(:login_permissions,
-> { where("link_class = 'permission' and name = 'can_login'") },
- foreign_key: :head_uuid,
+ foreign_key: 'head_uuid',
class_name: 'Link',
- primary_key: :uuid)
+ primary_key: 'uuid')
api_accessible :user, extend: :common do |t|
t.add :hostname
diff --git a/services/api/app/models/workflow.rb b/services/api/app/models/workflow.rb
index 94890c6632..0268c4e979 100644
--- a/services/api/app/models/workflow.rb
+++ b/services/api/app/models/workflow.rb
@@ -18,7 +18,7 @@ class Workflow < ArvadosModel
def validate_definition
begin
- @definition_yaml = YAML.load self.definition if !definition.nil?
+ @definition_yaml = YAML.safe_load self.definition if !definition.nil?
rescue => e
errors.add :definition, "is not valid yaml: #{e.message}"
end
@@ -27,7 +27,7 @@ class Workflow < ArvadosModel
def set_name_and_description
old_wf = {}
begin
- old_wf = YAML.load self.definition_was if !self.definition_was.nil?
+ old_wf = YAML.safe_load self.definition_was if !self.definition_was.nil?
rescue => e
logger.warn "set_name_and_description error: #{e.message}"
return
diff --git a/services/api/app/views/admin_notifier/new_inactive_user.text.erb b/services/api/app/views/admin_notifier/new_inactive_user.text.erb
index afcf34da71..22298b1ce7 100644
--- a/services/api/app/views/admin_notifier/new_inactive_user.text.erb
+++ b/services/api/app/views/admin_notifier/new_inactive_user.text.erb
@@ -2,15 +2,16 @@
SPDX-License-Identifier: AGPL-3.0 %>
+A new user has been created, but not set up.
-A new user landed on the inactive user page:
+ <%= @user.full_name %> <<%= @user.email %>> (<%= @user.username %>)
- <%= @user.full_name %> <<%= @user.email %>>
+They will not be able to use Arvados unless set up by an admin.
<% if Rails.configuration.Services.Workbench1.ExternalURL -%>
-Please see workbench for more information:
+Please see Workbench for more information:
- <%= Rails.configuration.Services.Workbench1.ExternalURL %>
+ <%= URI::join(Rails.configuration.Services.Workbench1.ExternalURL, "user/#{@user.uuid}") %>
<% end -%>
Thanks,
diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb
index 670b84b7c1..920906d833 100644
--- a/services/api/app/views/admin_notifier/new_user.text.erb
+++ b/services/api/app/views/admin_notifier/new_user.text.erb
@@ -2,22 +2,16 @@
SPDX-License-Identifier: AGPL-3.0 %>
-<%
- add_to_message = ''
- if Rails.configuration.Users.AutoSetupNewUsers
- add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
- end
-%>
-A new user has been created<%=add_to_message%>:
+A new user has been created and set up.
- <%= @user.full_name %> <<%= @user.email %>>
+ <%= @user.full_name %> <<%= @user.email %>> (<%= @user.username %>)
-This user is <%= @user.is_active ? '' : 'NOT ' %>active.
+They are able to use Arvados.
<% if Rails.configuration.Services.Workbench1.ExternalURL -%>
-Please see workbench for more information:
+Please see Workbench for more information:
- <%= Rails.configuration.Services.Workbench1.ExternalURL %>
+ <%= URI::join(Rails.configuration.Services.Workbench1.ExternalURL, "user/#{@user.uuid}") %>
<% end -%>
Thanks,
diff --git a/services/api/app/views/user_notifier/account_is_setup.text.erb b/services/api/app/views/user_notifier/account_is_setup.text.erb
index 352ee7754e..3f04db8517 100644
--- a/services/api/app/views/user_notifier/account_is_setup.text.erb
+++ b/services/api/app/views/user_notifier/account_is_setup.text.erb
@@ -2,4 +2,4 @@
SPDX-License-Identifier: AGPL-3.0 %>
-<%= ERB.new(Rails.configuration.Users.UserSetupMailText, 0, "-").result(binding) %>
+<%= ERB.new(Rails.configuration.Users.UserSetupMailText, trim_mode: "-").result(binding) %>
diff --git a/services/api/bin/rails b/services/api/bin/rails
index 5f594d1186..efc0377492 100755
--- a/services/api/bin/rails
+++ b/services/api/bin/rails
@@ -1,9 +1,4 @@
#!/usr/bin/env ruby
-
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-APP_PATH = File.expand_path('../config/application', __dir__)
-require_relative '../config/boot'
-require 'rails/commands'
+APP_PATH = File.expand_path("../config/application", __dir__)
+require_relative "../config/boot"
+require "rails/commands"
diff --git a/services/api/bin/rake b/services/api/bin/rake
index 87484df469..4fbf10b960 100755
--- a/services/api/bin/rake
+++ b/services/api/bin/rake
@@ -1,9 +1,4 @@
#!/usr/bin/env ruby
-
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require_relative '../config/boot'
-require 'rake'
+require_relative "../config/boot"
+require "rake"
Rake.application.run
diff --git a/services/api/bin/setup b/services/api/bin/setup
index c9142b942e..ec47b79b3b 100755
--- a/services/api/bin/setup
+++ b/services/api/bin/setup
@@ -1,38 +1,33 @@
#!/usr/bin/env ruby
-
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'fileutils'
-include FileUtils
+require "fileutils"
# path to your application root.
-APP_ROOT = File.expand_path('..', __dir__)
+APP_ROOT = File.expand_path("..", __dir__)
def system!(*args)
system(*args) || abort("\n== Command #{args} failed ==")
end
-chdir APP_ROOT do
- # This script is a starting point to setup your application.
+FileUtils.chdir APP_ROOT do
+ # This script is a way to set up or update your development environment automatically.
+ # This script is idempotent, so that you can run it at any time and get an expectable outcome.
# Add necessary setup steps to this file.
- puts '== Installing dependencies =='
- system! 'gem install bundler --conservative'
- system('bundle check') || system!('bundle install')
+ puts "== Installing dependencies =="
+ system! "gem install bundler --conservative"
+ system("bundle check") || system!("bundle install")
# puts "\n== Copying sample files =="
- # unless File.exist?('config/database.yml')
- # cp 'config/database.yml.sample', 'config/database.yml'
+ # unless File.exist?("config/database.yml")
+ # FileUtils.cp "config/database.yml.sample", "config/database.yml"
# end
puts "\n== Preparing database =="
- system! 'bin/rails db:setup'
+ system! "bin/rails db:prepare"
puts "\n== Removing old logs and tempfiles =="
- system! 'bin/rails log:clear tmp:clear'
+ system! "bin/rails log:clear tmp:clear"
puts "\n== Restarting application server =="
- system! 'bin/rails restart'
+ system! "bin/rails restart"
end
diff --git a/services/api/config.ru b/services/api/config.ru
index 30e8281843..4a3c09a688 100644
--- a/services/api/config.ru
+++ b/services/api/config.ru
@@ -1,8 +1,6 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# This file is used by Rack-based servers to start the application.
-require ::File.expand_path('../config/environment', __FILE__)
-run Server::Application
+require_relative "config/environment"
+
+run Rails.application
+Rails.application.load_server
diff --git a/services/api/config/application.rb b/services/api/config/application.rb
index b28ae0e071..716383f203 100644
--- a/services/api/config/application.rb
+++ b/services/api/config/application.rb
@@ -2,44 +2,26 @@
#
# SPDX-License-Identifier: AGPL-3.0
-require_relative 'boot'
+require_relative "boot"
require "rails"
-# Pick only the frameworks we need:
+# Pick the frameworks you want:
require "active_model/railtie"
require "active_job/railtie"
require "active_record/railtie"
+# require "active_storage/engine"
require "action_controller/railtie"
require "action_mailer/railtie"
+# require "action_mailbox/engine"
+# require "action_text/engine"
require "action_view/railtie"
+# require "action_cable/engine"
require "sprockets/railtie"
require "rails/test_unit/railtie"
-# Skipping the following:
-# * ActionCable (new in Rails 5.0) as it adds '/cable' routes that we're not using
-# * ActiveStorage (new in Rails 5.1)
-require 'digest'
-
-module Kernel
- def suppress_warnings
- verbose_orig = $VERBOSE
- begin
- $VERBOSE = nil
- yield
- ensure
- $VERBOSE = verbose_orig
- end
- end
-end
-
-if defined?(Bundler)
- suppress_warnings do
- # If you precompile assets before deploying to production, use this line
- Bundler.require(*Rails.groups(:assets => %w(development test)))
- # If you want your assets lazily compiled in production, use this line
- # Bundler.require(:default, :assets, Rails.env)
- end
-end
+# Require the gems listed in Gemfile, including any gems
+# you've limited to :test, :development, or :production.
+Bundler.require(*Rails.groups)
if ENV["ARVADOS_RAILS_LOG_TO_STDOUT"]
Rails.logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT))
@@ -47,38 +29,29 @@ end
module Server
class Application < Rails::Application
- # The following is to avoid SafeYAML's warning message
- SafeYAML::OPTIONS[:default_mode] = :safe
require_relative "arvados_config.rb"
- # Settings in config/environments/* take precedence over those specified here.
- # Application configuration should go into files in config/initializers
- # -- all .rb files in that directory are automatically loaded.
-
- # Custom directories with classes and modules you want to be autoloadable.
- # config.autoload_paths += %W(#{config.root}/extras)
+ # Initialize configuration defaults for specified Rails version.
+ config.load_defaults 7.0
- # Only load the plugins named here, in the order given (default is alphabetical).
- # :all can be used as a placeholder for all plugins not explicitly named.
- # config.plugins = [ :exception_notification, :ssl_requirement, :all ]
+ # Configuration for the application, engines, and railties goes here.
+ #
+ # These settings can be overridden in specific environments using the files
+ # in config/environments, which are processed later.
+ #
+ # config.time_zone = "Central Time (US & Canada)"
+ # config.eager_load_paths << Rails.root.join("extras")
- # Activate observers that should always be running.
- # config.active_record.observers = :cacher, :garbage_collector, :forum_observer
+ # We use db/structure.sql instead of db/schema.rb.
config.active_record.schema_format = :sql
- # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
- # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
- # config.i18n.default_locale = :de
-
- # Configure sensitive parameters which will be filtered from the log file.
- config.filter_parameters += [:password]
-
- # Load entire application at startup.
config.eager_load = true
config.active_support.test_order = :sorted
+ # container_request records can contain arbitrary data structures
+ # in mounts.*.content, so rails must not munge them.
config.action_dispatch.perform_deep_munge = false
# force_ssl's redirect-to-https feature doesn't work when the
@@ -86,7 +59,10 @@ module Server
# from connecting to Rails internally via plain http.
config.ssl_options = {redirect: false}
- I18n.enforce_available_locales = false
+ # This will change to 7.0 in a future release when there is no
+ # longer a possibility of rolling back to Arvados 2.7 (Rails 5.2)
+ # which cannot read 7.0-format cache files.
+ config.active_support.cache_format_version = 6.1
# Before using the filesystem backend for Rails.cache, check
# whether we own the relevant directory. If we don't, using it is
diff --git a/services/api/config/arvados_config.rb b/services/api/config/arvados_config.rb
index c0f7ee174f..f8b9ff8ecd 100644
--- a/services/api/config/arvados_config.rb
+++ b/services/api/config/arvados_config.rb
@@ -36,7 +36,7 @@ if !status.success?
puts stderr
raise "error loading config: #{status}"
end
-confs = YAML.load(defaultYAML, deserialize_symbols: false)
+confs = YAML.safe_load(defaultYAML)
clusterID, clusterConfig = confs["Clusters"].first
$arvados_config_defaults = clusterConfig
$arvados_config_defaults["ClusterID"] = clusterID
@@ -50,7 +50,7 @@ if ENV["ARVADOS_CONFIG"] == "none"
else
# Load the global config file
Open3.popen2("arvados-server", "config-dump", "-skip-legacy") do |stdin, stdout, status_thread|
- confs = YAML.load(stdout, deserialize_symbols: false)
+ confs = YAML.safe_load(stdout)
if confs && !confs.empty?
# config-dump merges defaults with user configuration, so every
# key should be set.
@@ -106,6 +106,7 @@ arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_emai
arvcfg.declare_config "Users.UserNotifierEmailBcc", Hash
arvcfg.declare_config "Users.NewUserNotificationRecipients", Hash, :new_user_notification_recipients, ->(cfg, k, v) { arrayToHash cfg, "Users.NewUserNotificationRecipients", v }
arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Hash, :new_inactive_user_notification_recipients, method(:arrayToHash)
+arvcfg.declare_config "Users.CanCreateRoleGroups", Boolean
arvcfg.declare_config "Users.RoleGroupsVisibleToAll", Boolean
arvcfg.declare_config "Login.LoginCluster", String
arvcfg.declare_config "Login.TrustedClients", Hash
@@ -131,7 +132,6 @@ arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_defa
arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
arvcfg.declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
arvcfg.declare_config "Containers.AlwaysUsePreemptibleInstances", Boolean, :preemptible_instances
-arvcfg.declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
arvcfg.declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
arvcfg.declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
arvcfg.declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
@@ -198,7 +198,7 @@ application_config = {}
path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
confs = ConfigLoader.load(path, erb: true)
# Ignore empty YAML file:
- next if confs == false
+ next if confs == nil
application_config.deep_merge!(confs['common'] || {})
application_config.deep_merge!(confs[::Rails.env.to_s] || {})
end
diff --git a/services/api/config/boot.rb b/services/api/config/boot.rb
index 8087911837..282011619d 100644
--- a/services/api/config/boot.rb
+++ b/services/api/config/boot.rb
@@ -1,8 +1,3 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
+ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__)
-# Set up gems listed in the Gemfile.
-ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__)
-
-require 'bundler/setup' # Set up gems listed in the Gemfile.
+require "bundler/setup" # Set up gems listed in the Gemfile.
diff --git a/services/api/config/environment.rb b/services/api/config/environment.rb
index cd706940a3..cac5315775 100644
--- a/services/api/config/environment.rb
+++ b/services/api/config/environment.rb
@@ -1,9 +1,5 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
+# Load the Rails application.
+require_relative "application"
-# Load the rails application
-require_relative 'application'
-
-# Initialize the rails application
+# Initialize the Rails application.
Rails.application.initialize!
diff --git a/services/api/config/initializers/application_controller_renderer.rb b/services/api/config/initializers/application_controller_renderer.rb
index 525d6adf95..89d2efab2b 100644
--- a/services/api/config/initializers/application_controller_renderer.rb
+++ b/services/api/config/initializers/application_controller_renderer.rb
@@ -1,7 +1,3 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
# ActiveSupport::Reloader.to_prepare do
diff --git a/services/api/config/initializers/assets.rb b/services/api/config/initializers/assets.rb
index f02c87b731..2eeef966fe 100644
--- a/services/api/config/initializers/assets.rb
+++ b/services/api/config/initializers/assets.rb
@@ -1,15 +1,12 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
# Version of your assets, change this if you want to expire all your assets.
-Rails.application.config.assets.version = '1.0'
+Rails.application.config.assets.version = "1.0"
-# Add additional assets to the asset load path
+# Add additional assets to the asset load path.
# Rails.application.config.assets.paths << Emoji.images_path
# Precompile additional assets.
-# application.js, application.css, and all non-JS/CSS in app/assets folder are already added.
-# Rails.application.config.assets.precompile += %w( search.js )
+# application.js, application.css, and all non-JS/CSS in the app/assets
+# folder are already added.
+# Rails.application.config.assets.precompile += %w( admin.js admin.css )
diff --git a/services/api/config/initializers/authorization.rb b/services/api/config/initializers/authorization.rb
index ec80048c8f..71d5557445 100644
--- a/services/api/config/initializers/authorization.rb
+++ b/services/api/config/initializers/authorization.rb
@@ -2,6 +2,8 @@
#
# SPDX-License-Identifier: AGPL-3.0
+require_relative "../../app/middlewares/arvados_api_token"
+
Server::Application.configure do
config.middleware.delete ActionDispatch::RemoteIp
config.middleware.insert 0, ActionDispatch::RemoteIp
diff --git a/services/api/config/initializers/backtrace_silencers.rb b/services/api/config/initializers/backtrace_silencers.rb
index b9c6bceef5..33699c3091 100644
--- a/services/api/config/initializers/backtrace_silencers.rb
+++ b/services/api/config/initializers/backtrace_silencers.rb
@@ -1,11 +1,8 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.
-# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ }
+# Rails.backtrace_cleaner.add_silencer { |line| /my_noisy_library/.match?(line) }
-# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code.
-# Rails.backtrace_cleaner.remove_silencers!
+# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code
+# by setting BACKTRACE=1 before calling your invocation, like "BACKTRACE=1 ./bin/rails runner 'MyClass.perform'".
+Rails.backtrace_cleaner.remove_silencers! if ENV["BACKTRACE"]
diff --git a/services/api/config/initializers/clear_empty_content_type.rb b/services/api/config/initializers/clear_empty_content_type.rb
new file mode 100644
index 0000000000..3e501be212
--- /dev/null
+++ b/services/api/config/initializers/clear_empty_content_type.rb
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Rails handler stack crashes if the request Content-Type header value
+# is "", which is sometimes the case in GET requests from
+# ruby-google-api-client (which have no body content anyway).
+#
+# This middleware deletes such headers, so a request with an empty
+# Content-Type value is equivalent to a missing Content-Type header.
+class ClearEmptyContentType
+ def initialize(app=nil, options=nil)
+ @app = app
+ end
+
+ def call(env)
+ if env["CONTENT_TYPE"] == ""
+ env.delete("CONTENT_TYPE")
+ end
+ @app.call(env) if @app.respond_to?(:call)
+ end
+end
+
+Server::Application.configure do
+ config.middleware.use ClearEmptyContentType
+end
diff --git a/services/api/config/initializers/content_security_policy.rb b/services/api/config/initializers/content_security_policy.rb
index 853ecdeec4..54f47cf15f 100644
--- a/services/api/config/initializers/content_security_policy.rb
+++ b/services/api/config/initializers/content_security_policy.rb
@@ -1,29 +1,25 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
-# Define an application-wide content security policy
-# For further information see the following documentation
-# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
+# Define an application-wide content security policy.
+# See the Securing Rails Applications Guide for more information:
+# https://guides.rubyonrails.org/security.html#content-security-policy-header
-# Rails.application.config.content_security_policy do |policy|
-# policy.default_src :self, :https
-# policy.font_src :self, :https, :data
-# policy.img_src :self, :https, :data
-# policy.object_src :none
-# policy.script_src :self, :https
-# policy.style_src :self, :https
-
-# # Specify URI for violation reports
-# # policy.report_uri "/csp-violation-report-endpoint"
+# Rails.application.configure do
+# config.content_security_policy do |policy|
+# policy.default_src :self, :https
+# policy.font_src :self, :https, :data
+# policy.img_src :self, :https, :data
+# policy.object_src :none
+# policy.script_src :self, :https
+# policy.style_src :self, :https
+# # Specify URI for violation reports
+# # policy.report_uri "/csp-violation-report-endpoint"
+# end
+#
+# # Generate session nonces for permitted importmap and inline scripts
+# config.content_security_policy_nonce_generator = ->(request) { request.session.id.to_s }
+# config.content_security_policy_nonce_directives = %w(script-src)
+#
+# # Report violations without enforcing the policy.
+# # config.content_security_policy_report_only = true
# end
-
-# If you are using UJS then enable automatic nonce generation
-# Rails.application.config.content_security_policy_nonce_generator = -> request { SecureRandom.base64(16) }
-
-# Report CSP violations to a specified URI
-# For further information see the following documentation:
-# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy-Report-Only
-# Rails.application.config.content_security_policy_report_only = true
diff --git a/services/api/config/initializers/cookies_serializer.rb b/services/api/config/initializers/cookies_serializer.rb
index 5409f55c0b..5a6a32d371 100644
--- a/services/api/config/initializers/cookies_serializer.rb
+++ b/services/api/config/initializers/cookies_serializer.rb
@@ -1,9 +1,5 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
# Specify a serializer for the signed and encrypted cookie jars.
# Valid options are :json, :marshal, and :hybrid.
-Rails.application.config.action_dispatch.cookies_serializer = :marshal
+Rails.application.config.action_dispatch.cookies_serializer = :json
diff --git a/services/api/config/initializers/custom_types.rb b/services/api/config/initializers/custom_types.rb
index aecd4cfd4b..9d909e6cbb 100644
--- a/services/api/config/initializers/custom_types.rb
+++ b/services/api/config/initializers/custom_types.rb
@@ -2,6 +2,8 @@
#
# SPDX-License-Identifier: AGPL-3.0
+require_relative "../../app/models/jsonb_type"
+
# JSONB backed Hash & Array types that default to their empty versions when
# reading NULL from the database, or get nil passed by parameter.
ActiveRecord::Type.register(:jsonbHash, JsonbType::Hash)
diff --git a/services/api/config/initializers/eventbus.rb b/services/api/config/initializers/eventbus.rb
deleted file mode 100644
index eb5561a47f..0000000000
--- a/services/api/config/initializers/eventbus.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-if ENV['ARVADOS_WEBSOCKETS']
- Server::Application.configure do
- Rails.logger.error "Built-in websocket server is disabled. See note (2017-03-23, e8cc0d7) at https://dev.arvados.org/projects/arvados/wiki/Upgrading_to_master"
-
- class EventBusRemoved
- def overloaded?
- false
- end
- def on_connect ws
- ws.on :open do |e|
- EM::Timer.new 1 do
- ws.send(SafeJSON.dump({status: 501, message: "Server misconfigured? see http://doc.arvados.org/install/install-ws.html"}))
- end
- EM::Timer.new 3 do
- ws.close
- end
- end
- end
- end
-
- config.middleware.insert_after(ArvadosApiToken, RackSocket, {
- handler: EventBusRemoved,
- mount: "/websocket",
- websocket_only: (ENV['ARVADOS_WEBSOCKETS'] == "ws-only")
- })
- end
-end
diff --git a/services/api/config/initializers/filter_parameter_logging.rb b/services/api/config/initializers/filter_parameter_logging.rb
index f26d0ad223..adc6568ce8 100644
--- a/services/api/config/initializers/filter_parameter_logging.rb
+++ b/services/api/config/initializers/filter_parameter_logging.rb
@@ -1,8 +1,8 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
-# Configure sensitive parameters which will be filtered from the log file.
-Rails.application.config.filter_parameters += [:password]
+# Configure parameters to be filtered from the log file. Use this to limit dissemination of
+# sensitive information. See the ActiveSupport::ParameterFilter documentation for supported
+# notations and behaviors.
+Rails.application.config.filter_parameters += [
+ :passw, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn
+]
diff --git a/services/api/config/initializers/inflections.rb b/services/api/config/initializers/inflections.rb
index 50bd0d5f55..bd92f2fd76 100644
--- a/services/api/config/initializers/inflections.rb
+++ b/services/api/config/initializers/inflections.rb
@@ -4,15 +4,21 @@
# Be sure to restart your server when you modify this file.
-# Add new inflection rules using the following format
-# (all these examples are active by default):
-# ActiveSupport::Inflector.inflections do |inflect|
-# inflect.plural /^(ox)$/i, '\1en'
-# inflect.singular /^(ox)en/i, '\1'
-# inflect.irregular 'person', 'people'
+# Add new inflection rules using the following format. Inflections
+# are locale specific, and you may define rules for as many different
+# locales as you wish. All of these examples are active by default:
+# ActiveSupport::Inflector.inflections(:en) do |inflect|
+# inflect.plural /^(ox)$/i, "\\1en"
+# inflect.singular /^(ox)en/i, "\\1"
+# inflect.irregular "person", "people"
# inflect.uncountable %w( fish sheep )
# end
+# These inflection rules are supported but not enabled by default:
+# ActiveSupport::Inflector.inflections(:en) do |inflect|
+# inflect.acronym "RESTful"
+# end
+
ActiveSupport::Inflector.inflections do |inflect|
inflect.plural(/^([Ss]pecimen)$/i, '\1s')
inflect.singular(/^([Ss]pecimen)s?/i, '\1')
diff --git a/services/api/config/initializers/mime_types.rb b/services/api/config/initializers/mime_types.rb
index 36683cc246..dc1899682b 100644
--- a/services/api/config/initializers/mime_types.rb
+++ b/services/api/config/initializers/mime_types.rb
@@ -1,9 +1,4 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
# Add new mime types for use in respond_to blocks:
# Mime::Type.register "text/richtext", :rtf
-# Mime::Type.register_alias "text/html", :iphone
diff --git a/services/api/config/initializers/new_framework_defaults.rb b/services/api/config/initializers/new_framework_defaults.rb
deleted file mode 100644
index 2e2f0b1810..0000000000
--- a/services/api/config/initializers/new_framework_defaults.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Be sure to restart your server when you modify this file.
-#
-# This file contains migration options to ease your Rails 5.0 upgrade.
-#
-# Once upgraded flip defaults one by one to migrate to the new default.
-#
-# Read the Guide for Upgrading Ruby on Rails for more info on each option.
-
-Rails.application.config.action_controller.raise_on_unfiltered_parameters = true
-
-# Enable per-form CSRF tokens. Previous versions had false.
-Rails.application.config.action_controller.per_form_csrf_tokens = false
-
-# Enable origin-checking CSRF mitigation. Previous versions had false.
-Rails.application.config.action_controller.forgery_protection_origin_check = false
-
-# Make Ruby 2.4 preserve the timezone of the receiver when calling `to_time`.
-# Previous versions had false.
-ActiveSupport.to_time_preserves_timezone = false
-
-# Require `belongs_to` associations by default. Previous versions had false.
-Rails.application.config.active_record.belongs_to_required_by_default = false
diff --git a/services/api/config/initializers/new_framework_defaults_5_2.rb b/services/api/config/initializers/new_framework_defaults_5_2.rb
deleted file mode 100644
index 93a8d52406..0000000000
--- a/services/api/config/initializers/new_framework_defaults_5_2.rb
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Be sure to restart your server when you modify this file.
-#
-# This file contains migration options to ease your Rails 5.2 upgrade.
-#
-# Once upgraded flip defaults one by one to migrate to the new default.
-#
-# Read the Guide for Upgrading Ruby on Rails for more info on each option.
-
-# Make Active Record use stable #cache_key alongside new #cache_version method.
-# This is needed for recyclable cache keys.
-# Rails.application.config.active_record.cache_versioning = true
-
-# Use AES-256-GCM authenticated encryption for encrypted cookies.
-# Also, embed cookie expiry in signed or encrypted cookies for increased security.
-#
-# This option is not backwards compatible with earlier Rails versions.
-# It's best enabled when your entire app is migrated and stable on 5.2.
-#
-# Existing cookies will be converted on read then written with the new scheme.
-# Rails.application.config.action_dispatch.use_authenticated_cookie_encryption = true
-
-# Use AES-256-GCM authenticated encryption as default cipher for encrypting messages
-# instead of AES-256-CBC, when use_authenticated_message_encryption is set to true.
-# Rails.application.config.active_support.use_authenticated_message_encryption = true
-
-# Add default protection from forgery to ActionController::Base instead of in
-# ApplicationController.
-# Rails.application.config.action_controller.default_protect_from_forgery = true
-
-# Store boolean values are in sqlite3 databases as 1 and 0 instead of 't' and
-# 'f' after migrating old data.
-# Rails.application.config.active_record.sqlite3.represent_boolean_as_integer = true
-
-# Use SHA-1 instead of MD5 to generate non-sensitive digests, such as the ETag header.
-# Rails.application.config.active_support.use_sha1_digests = true
-
-# Make `form_with` generate id attributes for any generated HTML tags.
-# Rails.application.config.action_view.form_with_generates_ids = true
diff --git a/services/api/config/initializers/permissions_policy.rb b/services/api/config/initializers/permissions_policy.rb
new file mode 100644
index 0000000000..00f64d71b0
--- /dev/null
+++ b/services/api/config/initializers/permissions_policy.rb
@@ -0,0 +1,11 @@
+# Define an application-wide HTTP permissions policy. For further
+# information see https://developers.google.com/web/updates/2018/06/feature-policy
+#
+# Rails.application.config.permissions_policy do |f|
+# f.camera :none
+# f.gyroscope :none
+# f.microphone :none
+# f.usb :none
+# f.fullscreen :self
+# f.payment :self, "https://secure.example.com"
+# end
diff --git a/services/api/config/initializers/reload_config.rb b/services/api/config/initializers/reload_config.rb
index 1582855bfb..f6ef8af963 100644
--- a/services/api/config/initializers/reload_config.rb
+++ b/services/api/config/initializers/reload_config.rb
@@ -2,11 +2,7 @@
#
# SPDX-License-Identifier: AGPL-3.0
-if !File.owned?(Rails.root.join('tmp'))
- Rails.logger.debug("reload_config: not owner of #{Rails.root}/tmp, skipping")
-elsif ENV["ARVADOS_CONFIG"] == "none"
- Rails.logger.debug("reload_config: no config in use, skipping")
-else
+def start_reload_thread
Thread.new do
lockfile = Rails.root.join('tmp', 'reload_config.lock')
File.open(lockfile, File::WRONLY|File::CREAT, 0600) do |f|
@@ -30,18 +26,18 @@ else
# precision cannot represent multiple updates per second.
if t.to_f != t_lastload.to_f || Time.now.to_f - t.to_f < 5
Open3.popen2("arvados-server", "config-dump", "-skip-legacy") do |stdin, stdout, status_thread|
- confs = YAML.load(stdout, deserialize_symbols: false)
+ confs = YAML.safe_load(stdout)
hash = confs["SourceSHA256"]
rescue => e
Rails.logger.info("reload_config: config file updated but could not be loaded: #{e}")
t_lastload = t
- continue
+ next
end
if hash == hash_lastload
# If we reloaded a new or updated file, but the content is
# identical, keep polling instead of restarting.
t_lastload = t
- continue
+ next
end
restartfile = Rails.root.join('tmp', 'restart.txt')
@@ -64,3 +60,15 @@ else
end
end
end
+
+if !File.owned?(Rails.root.join('tmp'))
+ Rails.logger.debug("reload_config: not owner of #{Rails.root}/tmp, skipping")
+elsif ENV["ARVADOS_CONFIG"] == "none"
+ Rails.logger.debug("reload_config: no config in use, skipping")
+elsif defined?(PhusionPassenger)
+ PhusionPassenger.on_event(:starting_worker_process) do |forked|
+ start_reload_thread
+ end
+else
+ start_reload_thread
+end
diff --git a/services/api/config/initializers/request_id_middleware.rb b/services/api/config/initializers/request_id_middleware.rb
index e2158801e7..cfb018ca97 100644
--- a/services/api/config/initializers/request_id_middleware.rb
+++ b/services/api/config/initializers/request_id_middleware.rb
@@ -14,7 +14,7 @@ module CustomRequestId
end
def internal_request_id
- "req-" + Random::DEFAULT.rand(2**128).to_s(36)[0..19]
+ "req-" + Random.new.rand(2**128).to_s(36)[0..19]
end
end
@@ -22,4 +22,4 @@ class ActionDispatch::RequestId
# Instead of using the default UUID-like format for X-Request-Id headers,
# use our own.
prepend CustomRequestId
-end
\ No newline at end of file
+end
diff --git a/services/api/config/initializers/schema_discovery_cache.rb b/services/api/config/initializers/schema_discovery_cache.rb
deleted file mode 100644
index c2cb8de081..0000000000
--- a/services/api/config/initializers/schema_discovery_cache.rb
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Delete the cached discovery document during startup. Otherwise we
-# might still serve an old discovery document after updating the
-# schema and restarting the server.
-
-Rails.cache.delete 'arvados_v1_rest_discovery'
diff --git a/services/api/config/initializers/wrap_parameters.rb b/services/api/config/initializers/wrap_parameters.rb
index 6fb9786504..bbfc3961bf 100644
--- a/services/api/config/initializers/wrap_parameters.rb
+++ b/services/api/config/initializers/wrap_parameters.rb
@@ -1,9 +1,5 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
# Be sure to restart your server when you modify this file.
-#
+
# This file contains settings for ActionController::ParamsWrapper which
# is enabled by default.
@@ -12,7 +8,7 @@ ActiveSupport.on_load(:action_controller) do
wrap_parameters format: [:json]
end
-# Disable root element in JSON by default.
-ActiveSupport.on_load(:active_record) do
- self.include_root_in_json = false
-end
+# To enable root element in JSON for ActiveRecord objects.
+# ActiveSupport.on_load(:active_record) do
+# self.include_root_in_json = true
+# end
diff --git a/services/api/config/locales/en.yml b/services/api/config/locales/en.yml
index e6a62cb837..cf9b342d0a 100644
--- a/services/api/config/locales/en.yml
+++ b/services/api/config/locales/en.yml
@@ -1,9 +1,33 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
+# Files in the config/locales directory are used for internationalization
+# and are automatically loaded by Rails. If you want to use locales other
+# than English, add the necessary files in this directory.
#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Sample localization file for English. Add more files in this directory for other locales.
-# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+# To use the locales, use `I18n.t`:
+#
+# I18n.t 'hello'
+#
+# In views, this is aliased to just `t`:
+#
+# <%= t('hello') %>
+#
+# To use a different locale, set it with `I18n.locale`:
+#
+# I18n.locale = :es
+#
+# This would use the information in config/locales/es.yml.
+#
+# The following keys must be escaped otherwise they will not be retrieved by
+# the default I18n backend:
+#
+# true, false, on, off, yes, no
+#
+# Instead, surround them with single quotes.
+#
+# en:
+# 'true': 'foo'
+#
+# To learn more, please read the Rails Internationalization guide
+# available at https://guides.rubyonrails.org/i18n.html.
en:
hello: "Hello world"
diff --git a/services/api/config/routes.rb b/services/api/config/routes.rb
index 9c7bfc3a7a..b87e86f664 100644
--- a/services/api/config/routes.rb
+++ b/services/api/config/routes.rb
@@ -40,10 +40,13 @@ Rails.application.routes.draw do
get 'auth', on: :member
post 'lock', on: :member
post 'unlock', on: :member
+ post 'update_priority', on: :member
get 'secret_mounts', on: :member
get 'current', on: :collection
end
- resources :container_requests
+ resources :container_requests do
+ get 'container_status', on: :member
+ end
resources :jobs do
get 'queue', on: :collection
get 'queue_size', on: :collection
diff --git a/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb b/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb
index 049b5e2d63..2c1b406000 100644
--- a/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb
+++ b/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb
@@ -17,7 +17,7 @@ class RenameMetadataAttributes < ActiveRecord::Migration[4.2]
Metadatum.where('head like ?', 'orvos#%').each do |m|
kind_uuid = m.head.match /^(orvos\#.*)\#([-0-9a-z]+)$/
if kind_uuid
- m.update_attributes(head_kind: kind_uuid[1],
+ m.update(head_kind: kind_uuid[1],
head: kind_uuid[2])
end
end
@@ -28,7 +28,7 @@ class RenameMetadataAttributes < ActiveRecord::Migration[4.2]
def down
begin
Metadatum.where('head_kind is not null and head_kind <> ? and head is not null', '').each do |m|
- m.update_attributes(head: m.head_kind + '#' + m.head)
+ m.update(head: m.head_kind + '#' + m.head)
end
rescue
end
diff --git a/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb b/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb
index 71f769c157..0a05718fdd 100644
--- a/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb
+++ b/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb
@@ -6,13 +6,13 @@ class SetGroupClassOnAnonymousGroup < ActiveRecord::Migration[4.2]
include CurrentApiClient
def up
act_as_system_user do
- anonymous_group.update_attributes group_class: 'role', name: 'Anonymous users', description: 'Anonymous users'
+ anonymous_group.update group_class: 'role', name: 'Anonymous users', description: 'Anonymous users'
end
end
def down
act_as_system_user do
- anonymous_group.update_attributes group_class: nil, name: 'Anonymous group', description: 'Anonymous group'
+ anonymous_group.update group_class: nil, name: 'Anonymous group', description: 'Anonymous group'
end
end
end
diff --git a/services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb b/services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb
index 8814fc87d3..1d3a6ed1b4 100644
--- a/services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb
+++ b/services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb
@@ -107,7 +107,7 @@ class FixCollectionPortableDataHashWithHintedManifest < ActiveRecord::Migration[
attributes[:properties]["migrated_from"] ||= coll.uuid
coll_copy = Collection.create!(attributes)
Log.log_create(coll_copy)
- coll.update_attributes(portable_data_hash: stripped_pdh)
+ coll.update(portable_data_hash: stripped_pdh)
Log.log_update(coll, start_log)
end
end
diff --git a/services/api/db/migrate/20180917205609_recompute_file_names_index.rb b/services/api/db/migrate/20180917205609_recompute_file_names_index.rb
index b321422143..ed6be3bfe1 100644
--- a/services/api/db/migrate/20180917205609_recompute_file_names_index.rb
+++ b/services/api/db/migrate/20180917205609_recompute_file_names_index.rb
@@ -8,7 +8,7 @@ class RecomputeFileNamesIndex < ActiveRecord::Migration[4.2]
Collection.select(:portable_data_hash, :manifest_text).where(portable_data_hash: pdhs).distinct(:portable_data_hash).each do |c|
ActiveRecord::Base.connection.exec_query("update collections set file_names=$1 where portable_data_hash=$2",
"update file_names index",
- [[nil, c.manifest_files], [nil, c.portable_data_hash]])
+ [c.manifest_files, c.portable_data_hash])
end
ActiveRecord::Base.connection.exec_query('COMMIT')
end
diff --git a/services/api/db/migrate/20220726034131_write_via_all_users.rb b/services/api/db/migrate/20220726034131_write_via_all_users.rb
new file mode 100644
index 0000000000..f1280597f9
--- /dev/null
+++ b/services/api/db/migrate/20220726034131_write_via_all_users.rb
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WriteViaAllUsers < ActiveRecord::Migration[5.2]
+ include CurrentApiClient
+ def up
+ changelinks(from: "can_read", to: "can_write")
+ end
+ def down
+ changelinks(from: "can_write", to: "can_read")
+ end
+ def changelinks(from:, to:)
+ ActiveRecord::Base.connection.exec_query(
+ "update links set name=$1 where link_class=$2 and name=$3 and tail_uuid like $4 and head_uuid = $5",
+ "migrate", [
+ to,
+ "permission",
+ from,
+ "_____-tpzed-_______________",
+ all_users_group_uuid,
+ ])
+ end
+end
diff --git a/services/api/db/migrate/20220804133317_add_cost_to_containers.rb b/services/api/db/migrate/20220804133317_add_cost_to_containers.rb
new file mode 100644
index 0000000000..188187e394
--- /dev/null
+++ b/services/api/db/migrate/20220804133317_add_cost_to_containers.rb
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddCostToContainers < ActiveRecord::Migration[5.2]
+ def change
+ add_column :containers, :cost, :float, null: false, default: 0
+ add_column :containers, :subrequests_cost, :float, null: false, default: 0
+ add_column :container_requests, :cumulative_cost, :float, null: false, default: 0
+ end
+end
diff --git a/services/api/db/migrate/20221219165512_dedup_permission_links.rb b/services/api/db/migrate/20221219165512_dedup_permission_links.rb
new file mode 100644
index 0000000000..6aef343f1c
--- /dev/null
+++ b/services/api/db/migrate/20221219165512_dedup_permission_links.rb
@@ -0,0 +1,46 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'update_permissions'
+
+class DedupPermissionLinks < ActiveRecord::Migration[5.2]
+ include CurrentApiClient
+ def up
+ act_as_system_user do
+ batch_update_permissions do
+ rows = ActiveRecord::Base.connection.select_all("SELECT MIN(uuid) AS uuid, COUNT(uuid) AS n FROM links
+ WHERE tail_uuid IS NOT NULL
+ AND head_uuid IS NOT NULL
+ AND link_class = 'permission'
+ AND name in ('can_read', 'can_write', 'can_manage')
+ GROUP BY (tail_uuid, head_uuid)
+ HAVING COUNT(uuid) > 1")
+ rows.each do |row|
+ Rails.logger.debug "DedupPermissionLinks: consolidating #{row['n']} links into #{row['uuid']}"
+ link = Link.find_by_uuid(row['uuid'])
+ # This no-op update has the side effect that the update hooks
+ # will merge the highest available permission into this one
+ # and then delete the others.
+ link.update!(properties: link.properties.dup)
+ end
+
+ rows = ActiveRecord::Base.connection.select_all("SELECT MIN(uuid) AS uuid, COUNT(uuid) AS n FROM links
+ WHERE tail_uuid IS NOT NULL
+ AND head_uuid IS NOT NULL
+ AND link_class = 'permission'
+ AND name = 'can_login'
+ GROUP BY (tail_uuid, head_uuid, properties)
+ HAVING COUNT(uuid) > 1")
+ rows.each do |row|
+ Rails.logger.debug "DedupPermissionLinks: consolidating #{row['n']} links into #{row['uuid']}"
+ link = Link.find_by_uuid(row['uuid'])
+ link.update!(properties: link.properties.dup)
+ end
+ end
+ end
+ end
+ def down
+ # no-op -- restoring redundant records would still be redundant
+ end
+end
diff --git a/services/api/db/migrate/20221230155924_bigint_id.rb b/services/api/db/migrate/20221230155924_bigint_id.rb
new file mode 100644
index 0000000000..932cb025dc
--- /dev/null
+++ b/services/api/db/migrate/20221230155924_bigint_id.rb
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class BigintId < ActiveRecord::Migration[5.2]
+ disable_ddl_transaction!
+ def up
+ old_value = query_value('SHOW statement_timeout')
+ execute "SET statement_timeout TO '0'"
+
+ change_column :api_client_authorizations, :id, :bigint
+ change_column :api_client_authorizations, :api_client_id, :bigint
+ change_column :api_client_authorizations, :user_id, :bigint
+ change_column :api_clients, :id, :bigint
+ change_column :authorized_keys, :id, :bigint
+ change_column :collections, :id, :bigint
+ change_column :container_requests, :id, :bigint
+ change_column :containers, :id, :bigint
+ change_column :groups, :id, :bigint
+ change_column :humans, :id, :bigint
+ change_column :job_tasks, :id, :bigint
+ change_column :jobs, :id, :bigint
+ change_column :keep_disks, :id, :bigint
+ change_column :keep_services, :id, :bigint
+ change_column :links, :id, :bigint
+ change_column :logs, :id, :bigint
+ change_column :nodes, :id, :bigint
+ change_column :users, :id, :bigint
+ change_column :pipeline_instances, :id, :bigint
+ change_column :pipeline_templates, :id, :bigint
+ change_column :repositories, :id, :bigint
+ change_column :specimens, :id, :bigint
+ change_column :traits, :id, :bigint
+ change_column :virtual_machines, :id, :bigint
+ change_column :workflows, :id, :bigint
+
+ execute "SET statement_timeout TO #{quote(old_value)}"
+ end
+
+ def down
+ end
+end
diff --git a/services/api/db/migrate/20230421142716_add_name_index_to_collections_and_groups.rb b/services/api/db/migrate/20230421142716_add_name_index_to_collections_and_groups.rb
new file mode 100644
index 0000000000..5fe450d05c
--- /dev/null
+++ b/services/api/db/migrate/20230421142716_add_name_index_to_collections_and_groups.rb
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddNameIndexToCollectionsAndGroups < ActiveRecord::Migration[5.2]
+ def up
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_groups_on_name on groups USING gin (name gin_trgm_ops)'
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_collections_on_name on collections USING gin (name gin_trgm_ops)'
+ end
+ def down
+ ActiveRecord::Base.connection.execute 'DROP INDEX index_collections_on_name'
+ ActiveRecord::Base.connection.execute 'DROP INDEX index_groups_on_name'
+ end
+end
diff --git a/services/api/db/migrate/20230503224107_priority_update_functions.rb b/services/api/db/migrate/20230503224107_priority_update_functions.rb
new file mode 100644
index 0000000000..3504a10691
--- /dev/null
+++ b/services/api/db/migrate/20230503224107_priority_update_functions.rb
@@ -0,0 +1,69 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PriorityUpdateFunctions < ActiveRecord::Migration[5.2]
+ def up
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) returns bigint
+ LANGUAGE sql
+ AS $$
+/* Determine the priority of an individual container.
+ The "inherited" priority comes from the path we followed from the root, the parent container
+ priority hasn't been updated in the table yet but we need to behave it like it has been.
+*/
+select coalesce(max(case when containers.uuid = inherited_from then inherited
+ when containers.priority is not NULL then containers.priority
+ else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint
+ end), 0) from
+ container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid
+ where container_requests.container_uuid = for_container_uuid and container_requests.state = 'Committed' and container_requests.priority > 0;
+$$;
+}
+
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION container_tree_priorities(for_container_uuid character varying) returns table (pri_container_uuid character varying, upd_priority bigint)
+ LANGUAGE sql
+ AS $$
+/* Calculate the priorities of all containers starting from for_container_uuid.
+ This traverses the process tree downward and calls container_priority for each container
+ and returns a table of container uuids and their new priorities.
+*/
+with recursive tab(upd_container_uuid, upd_priority) as (
+ select for_container_uuid, container_priority(for_container_uuid, 0, '')
+union
+ select containers.uuid, container_priority(containers.uuid, child_requests.upd_priority, child_requests.upd_container_uuid)
+ from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests
+ join containers on child_requests.container_uuid = containers.uuid
+ where containers.state in ('Queued', 'Locked', 'Running')
+)
+select upd_container_uuid, upd_priority from tab;
+$$;
+}
+
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION container_tree(for_container_uuid character varying) returns table (pri_container_uuid character varying)
+ LANGUAGE sql
+ AS $$
+/* A lighter weight version of the update_priorities query that only returns the containers in a tree,
+ used by SELECT FOR UPDATE.
+*/
+with recursive tab(upd_container_uuid) as (
+ select for_container_uuid
+union
+ select containers.uuid
+ from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests
+ join containers on child_requests.container_uuid = containers.uuid
+ where containers.state in ('Queued', 'Locked', 'Running')
+)
+select upd_container_uuid from tab;
+$$;
+}
+ end
+
+ def down
+ ActiveRecord::Base.connection.execute "DROP FUNCTION container_priority"
+ ActiveRecord::Base.connection.execute "DROP FUNCTION container_tree_priorities"
+ ActiveRecord::Base.connection.execute "DROP FUNCTION container_tree"
+ end
+end
diff --git a/services/api/db/migrate/20230815160000_jsonb_exists_functions.rb b/services/api/db/migrate/20230815160000_jsonb_exists_functions.rb
new file mode 100644
index 0000000000..751babff1f
--- /dev/null
+++ b/services/api/db/migrate/20230815160000_jsonb_exists_functions.rb
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JsonbExistsFunctions < ActiveRecord::Migration[5.2]
+ def up
+
+ # Define functions for the "?" and "?&" operators. We can't use
+ # "?" and "?&" directly in ActiveRecord queries because "?" is
+ # used for parameter substitution.
+ #
+ # We used to use jsonb_exists() and jsonb_exists_all() but
+ # apparently Postgres associates indexes with operators but not
+ # with functions, so while a query using an operator can use the
+ # index, the equivalent clause using the function will always
+ # perform a full row scan.
+ #
+ # See ticket https://dev.arvados.org/issues/20858 for examples.
+ #
+ # As a workaround, we can define IMMUTABLE functions, which are
+ # directly inlined into the query, which then uses the index as
+ # intended.
+ #
+ # Huge shout out to this stack overflow post that explained what
+ # is going on and provides the workaround used here.
+ #
+ # https://dba.stackexchange.com/questions/90002/postgresql-operator-uses-index-but-underlying-function-does-not
+
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION jsonb_exists_inline_op(jsonb, text)
+RETURNS bool
+LANGUAGE sql
+IMMUTABLE
+AS $$SELECT $1 ? $2$$
+}
+
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION jsonb_exists_all_inline_op(jsonb, text[])
+RETURNS bool
+LANGUAGE sql
+IMMUTABLE
+AS 'SELECT $1 ?& $2'
+}
+ end
+
+ def down
+ ActiveRecord::Base.connection.execute "DROP FUNCTION jsonb_exists_inline_op"
+ ActiveRecord::Base.connection.execute "DROP FUNCTION jsonb_exists_all_inline_op"
+ end
+end
diff --git a/services/api/db/migrate/20230821000000_priority_update_fix.rb b/services/api/db/migrate/20230821000000_priority_update_fix.rb
new file mode 100644
index 0000000000..514f0d4e18
--- /dev/null
+++ b/services/api/db/migrate/20230821000000_priority_update_fix.rb
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PriorityUpdateFix < ActiveRecord::Migration[5.2]
+ def up
+ ActiveRecord::Base.connection.execute %{
+CREATE OR REPLACE FUNCTION container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) returns bigint
+ LANGUAGE sql
+ AS $$
+/* Determine the priority of an individual container.
+ The "inherited" priority comes from the path we followed from the root, the parent container
+ priority hasn't been updated in the table yet but we need to behave it like it has been.
+*/
+select coalesce(max(case when containers.uuid = inherited_from then inherited
+ when containers.priority is not NULL then containers.priority
+ else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint
+ end), 0) from
+ container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid
+ where container_requests.container_uuid = for_container_uuid and
+ container_requests.state = 'Committed' and
+ container_requests.priority > 0 and
+ container_requests.owner_uuid not in (select group_uuid from trashed_groups);
+$$;
+}
+ end
+
+ def down
+ end
+end
diff --git a/services/api/db/migrate/20230922000000_add_btree_name_index_to_collections_and_groups.rb b/services/api/db/migrate/20230922000000_add_btree_name_index_to_collections_and_groups.rb
new file mode 100644
index 0000000000..7e6e725c9b
--- /dev/null
+++ b/services/api/db/migrate/20230922000000_add_btree_name_index_to_collections_and_groups.rb
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddBtreeNameIndexToCollectionsAndGroups < ActiveRecord::Migration[5.2]
+ #
+ # We previously added 'index_groups_on_name' and
+ # 'index_collections_on_name' but those are 'gin_trgm_ops' which is
+ # used with 'ilike' searches but despite documentation suggesting
+ # they would be, experience has shown these indexes don't get used
+ # for '=' (and/or they are much slower than the btree for exact
+ # matches).
+ #
+ # So we want to add a regular btree index.
+ #
+ def up
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_groups_on_name_btree on groups USING btree (name)'
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_collections_on_name_btree on collections USING btree (name)'
+ end
+ def down
+ ActiveRecord::Base.connection.execute 'DROP INDEX IF EXISTS index_collections_on_name_btree'
+ ActiveRecord::Base.connection.execute 'DROP INDEX IF EXISTS index_groups_on_name_btree'
+ end
+end
diff --git a/services/api/db/migrate/20231013000000_compute_permission_index.rb b/services/api/db/migrate/20231013000000_compute_permission_index.rb
new file mode 100644
index 0000000000..ecd85ef1bc
--- /dev/null
+++ b/services/api/db/migrate/20231013000000_compute_permission_index.rb
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ComputePermissionIndex < ActiveRecord::Migration[5.2]
+ def up
+ # The inner part of compute_permission_subgraph has a query clause like this:
+ #
+ # where u.perm_origin_uuid = m.target_uuid AND m.traverse_owned
+ # AND (m.user_uuid = m.target_uuid or m.target_uuid not like '_____-tpzed-_______________')
+ #
+ # This will end up doing a sequential scan on
+ # materialized_permissions, which can easily have millions of
+ # rows, unless we fully index the table for this query. In one test,
+ # this brought the compute_permission_subgraph query from over 6
+ # seconds down to 250ms.
+ #
+ ActiveRecord::Base.connection.execute "drop index if exists index_materialized_permissions_target_is_not_user"
+ ActiveRecord::Base.connection.execute %{
+create index index_materialized_permissions_target_is_not_user on materialized_permissions (target_uuid, traverse_owned, (user_uuid = target_uuid or target_uuid not like '_____-tpzed-_______________'));
+}
+ end
+
+ def down
+ ActiveRecord::Base.connection.execute "drop index if exists index_materialized_permissions_target_is_not_user"
+ end
+end
diff --git a/services/api/db/structure.sql b/services/api/db/structure.sql
index c5f6d567bf..c0d4263d97 100644
--- a/services/api/db/structure.sql
+++ b/services/api/db/structure.sql
@@ -62,10 +62,10 @@ with
permission (permission origin is self).
*/
perm_from_start(perm_origin_uuid, target_uuid, val, traverse_owned) as (
-
+
WITH RECURSIVE
traverse_graph(origin_uuid, target_uuid, val, traverse_owned, starting_set) as (
-
+
values (perm_origin_uuid, starting_uuid, starting_perm,
should_traverse_owned(starting_uuid, starting_perm),
(perm_origin_uuid = starting_uuid or starting_uuid not like '_____-tpzed-_______________'))
@@ -107,10 +107,10 @@ case (edges.edge_id = perm_edge_id)
can_manage permission granted by ownership.
*/
additional_perms(perm_origin_uuid, target_uuid, val, traverse_owned) as (
-
+
WITH RECURSIVE
traverse_graph(origin_uuid, target_uuid, val, traverse_owned, starting_set) as (
-
+
select edges.tail_uuid as origin_uuid, edges.head_uuid as target_uuid, edges.val,
should_traverse_owned(edges.head_uuid, edges.val),
edges.head_uuid like '_____-j7d0g-_______________'
@@ -190,6 +190,92 @@ case (edges.edge_id = perm_edge_id)
$$;
+--
+-- Name: container_priority(character varying, bigint, character varying); Type: FUNCTION; Schema: public; Owner: -
+--
+
+CREATE FUNCTION public.container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) RETURNS bigint
+ LANGUAGE sql
+ AS $$
+/* Determine the priority of an individual container.
+ The "inherited" priority comes from the path we followed from the root, the parent container
+ priority hasn't been updated in the table yet but we need to behave it like it has been.
+*/
+select coalesce(max(case when containers.uuid = inherited_from then inherited
+ when containers.priority is not NULL then containers.priority
+ else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint
+ end), 0) from
+ container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid
+ where container_requests.container_uuid = for_container_uuid and
+ container_requests.state = 'Committed' and
+ container_requests.priority > 0 and
+ container_requests.owner_uuid not in (select group_uuid from trashed_groups);
+$$;
+
+
+--
+-- Name: container_tree(character varying); Type: FUNCTION; Schema: public; Owner: -
+--
+
+CREATE FUNCTION public.container_tree(for_container_uuid character varying) RETURNS TABLE(pri_container_uuid character varying)
+ LANGUAGE sql
+ AS $$
+/* A lighter weight version of the update_priorities query that only returns the containers in a tree,
+ used by SELECT FOR UPDATE.
+*/
+with recursive tab(upd_container_uuid) as (
+ select for_container_uuid
+union
+ select containers.uuid
+ from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests
+ join containers on child_requests.container_uuid = containers.uuid
+ where containers.state in ('Queued', 'Locked', 'Running')
+)
+select upd_container_uuid from tab;
+$$;
+
+
+--
+-- Name: container_tree_priorities(character varying); Type: FUNCTION; Schema: public; Owner: -
+--
+
+CREATE FUNCTION public.container_tree_priorities(for_container_uuid character varying) RETURNS TABLE(pri_container_uuid character varying, upd_priority bigint)
+ LANGUAGE sql
+ AS $$
+/* Calculate the priorities of all containers starting from for_container_uuid.
+ This traverses the process tree downward and calls container_priority for each container
+ and returns a table of container uuids and their new priorities.
+*/
+with recursive tab(upd_container_uuid, upd_priority) as (
+ select for_container_uuid, container_priority(for_container_uuid, 0, '')
+union
+ select containers.uuid, container_priority(containers.uuid, child_requests.upd_priority, child_requests.upd_container_uuid)
+ from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests
+ join containers on child_requests.container_uuid = containers.uuid
+ where containers.state in ('Queued', 'Locked', 'Running')
+)
+select upd_container_uuid, upd_priority from tab;
+$$;
+
+
+--
+-- Name: jsonb_exists_all_inline_op(jsonb, text[]); Type: FUNCTION; Schema: public; Owner: -
+--
+
+CREATE FUNCTION public.jsonb_exists_all_inline_op(jsonb, text[]) RETURNS boolean
+ LANGUAGE sql IMMUTABLE
+ AS $_$SELECT $1 ?& $2$_$;
+
+
+--
+-- Name: jsonb_exists_inline_op(jsonb, text); Type: FUNCTION; Schema: public; Owner: -
+--
+
+CREATE FUNCTION public.jsonb_exists_inline_op(jsonb, text) RETURNS boolean
+ LANGUAGE sql IMMUTABLE
+ AS $_$SELECT $1 ? $2$_$;
+
+
--
-- Name: project_subtree_with_is_frozen(character varying, boolean); Type: FUNCTION; Schema: public; Owner: -
--
@@ -261,10 +347,10 @@ SET default_with_oids = false;
--
CREATE TABLE public.api_client_authorizations (
- id integer NOT NULL,
+ id bigint NOT NULL,
api_token character varying(255) NOT NULL,
- api_client_id integer NOT NULL,
- user_id integer NOT NULL,
+ api_client_id bigint NOT NULL,
+ user_id bigint NOT NULL,
created_by_ip_address character varying(255),
last_used_by_ip_address character varying(255),
last_used_at timestamp without time zone,
@@ -301,7 +387,7 @@ ALTER SEQUENCE public.api_client_authorizations_id_seq OWNED BY public.api_clien
--
CREATE TABLE public.api_clients (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
modified_by_client_uuid character varying(255),
@@ -351,7 +437,7 @@ CREATE TABLE public.ar_internal_metadata (
--
CREATE TABLE public.authorized_keys (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -391,7 +477,7 @@ ALTER SEQUENCE public.authorized_keys_id_seq OWNED BY public.authorized_keys.id;
--
CREATE TABLE public.collections (
- id integer NOT NULL,
+ id bigint NOT NULL,
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
modified_by_client_uuid character varying(255),
@@ -446,7 +532,7 @@ ALTER SEQUENCE public.collections_id_seq OWNED BY public.collections.id;
--
CREATE TABLE public.container_requests (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -481,7 +567,8 @@ CREATE TABLE public.container_requests (
secret_mounts jsonb DEFAULT '{}'::jsonb,
runtime_token text,
output_storage_classes jsonb DEFAULT '["default"]'::jsonb,
- output_properties jsonb DEFAULT '{}'::jsonb
+ output_properties jsonb DEFAULT '{}'::jsonb,
+ cumulative_cost double precision DEFAULT 0.0 NOT NULL
);
@@ -509,7 +596,7 @@ ALTER SEQUENCE public.container_requests_id_seq OWNED BY public.container_reques
--
CREATE TABLE public.containers (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -545,7 +632,9 @@ CREATE TABLE public.containers (
gateway_address character varying,
interactive_session_started boolean DEFAULT false NOT NULL,
output_storage_classes jsonb DEFAULT '["default"]'::jsonb,
- output_properties jsonb DEFAULT '{}'::jsonb
+ output_properties jsonb DEFAULT '{}'::jsonb,
+ cost double precision DEFAULT 0.0 NOT NULL,
+ subrequests_cost double precision DEFAULT 0.0 NOT NULL
);
@@ -582,7 +671,7 @@ CREATE TABLE public.frozen_groups (
--
CREATE TABLE public.groups (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -625,7 +714,7 @@ ALTER SEQUENCE public.groups_id_seq OWNED BY public.groups.id;
--
CREATE TABLE public.humans (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -661,7 +750,7 @@ ALTER SEQUENCE public.humans_id_seq OWNED BY public.humans.id;
--
CREATE TABLE public.job_tasks (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
modified_by_client_uuid character varying(255),
@@ -725,7 +814,7 @@ ALTER SEQUENCE public.job_tasks_qsequence_seq OWNED BY public.job_tasks.qsequenc
--
CREATE TABLE public.jobs (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
modified_by_client_uuid character varying(255),
@@ -786,7 +875,7 @@ ALTER SEQUENCE public.jobs_id_seq OWNED BY public.jobs.id;
--
CREATE TABLE public.keep_disks (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -832,7 +921,7 @@ ALTER SEQUENCE public.keep_disks_id_seq OWNED BY public.keep_disks.id;
--
CREATE TABLE public.keep_services (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -872,7 +961,7 @@ ALTER SEQUENCE public.keep_services_id_seq OWNED BY public.keep_services.id;
--
CREATE TABLE public.links (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -912,7 +1001,7 @@ ALTER SEQUENCE public.links_id_seq OWNED BY public.links.id;
--
CREATE TABLE public.logs (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
modified_by_client_uuid character varying(255),
@@ -965,7 +1054,7 @@ CREATE TABLE public.materialized_permissions (
--
CREATE TABLE public.nodes (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -1009,7 +1098,7 @@ ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
--
CREATE TABLE public.users (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255) NOT NULL,
created_at timestamp without time zone NOT NULL,
@@ -1072,7 +1161,7 @@ UNION ALL
--
CREATE TABLE public.pipeline_instances (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -1116,7 +1205,7 @@ ALTER SEQUENCE public.pipeline_instances_id_seq OWNED BY public.pipeline_instanc
--
CREATE TABLE public.pipeline_templates (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -1154,7 +1243,7 @@ ALTER SEQUENCE public.pipeline_templates_id_seq OWNED BY public.pipeline_templat
--
CREATE TABLE public.repositories (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -1199,7 +1288,7 @@ CREATE TABLE public.schema_migrations (
--
CREATE TABLE public.specimens (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -1236,7 +1325,7 @@ ALTER SEQUENCE public.specimens_id_seq OWNED BY public.specimens.id;
--
CREATE TABLE public.traits (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -1302,7 +1391,7 @@ ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;
--
CREATE TABLE public.virtual_machines (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
modified_by_client_uuid character varying(255),
@@ -1338,7 +1427,7 @@ ALTER SEQUENCE public.virtual_machines_id_seq OWNED BY public.virtual_machines.i
--
CREATE TABLE public.workflows (
- id integer NOT NULL,
+ id bigint NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
@@ -1941,6 +2030,20 @@ CREATE INDEX index_collections_on_is_trashed ON public.collections USING btree (
CREATE INDEX index_collections_on_modified_at_and_uuid ON public.collections USING btree (modified_at, uuid);
+--
+-- Name: index_collections_on_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_name ON public.collections USING gin (name public.gin_trgm_ops);
+
+
+--
+-- Name: index_collections_on_name_btree; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_name_btree ON public.collections USING btree (name);
+
+
--
-- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -2130,6 +2233,20 @@ CREATE INDEX index_groups_on_is_trashed ON public.groups USING btree (is_trashed
CREATE INDEX index_groups_on_modified_at_and_uuid ON public.groups USING btree (modified_at, uuid);
+--
+-- Name: index_groups_on_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_name ON public.groups USING gin (name public.gin_trgm_ops);
+
+
+--
+-- Name: index_groups_on_name_btree; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_name_btree ON public.groups USING btree (name);
+
+
--
-- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
@@ -2473,6 +2590,13 @@ CREATE INDEX index_logs_on_summary ON public.logs USING btree (summary);
CREATE UNIQUE INDEX index_logs_on_uuid ON public.logs USING btree (uuid);
+--
+-- Name: index_materialized_permissions_target_is_not_user; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_materialized_permissions_target_is_not_user ON public.materialized_permissions USING btree (target_uuid, traverse_owned, ((((user_uuid)::text = (target_uuid)::text) OR ((target_uuid)::text !~~ '_____-tpzed-_______________'::text))));
+
+
--
-- Name: index_nodes_on_created_at; Type: INDEX; Schema: public; Owner: -
--
@@ -3182,6 +3306,14 @@ INSERT INTO "schema_migrations" (version) VALUES
('20220301155729'),
('20220303204419'),
('20220401153101'),
-('20220505112900');
-
-
+('20220505112900'),
+('20220726034131'),
+('20220804133317'),
+('20221219165512'),
+('20221230155924'),
+('20230421142716'),
+('20230503224107'),
+('20230815160000'),
+('20230821000000'),
+('20230922000000'),
+('20231013000000');
diff --git a/services/api/fpm-info.sh b/services/api/fpm-info.sh
index 570f4601c5..cccbc1b56b 100644
--- a/services/api/fpm-info.sh
+++ b/services/api/fpm-info.sh
@@ -5,13 +5,9 @@
fpm_depends+=('git >= 1.7.10')
case "$TARGET" in
- centos*)
+ centos*|rocky*)
fpm_depends+=(libcurl-devel postgresql-devel bison make automake gcc gcc-c++ postgresql shared-mime-info)
;;
- ubuntu1804)
- fpm_depends+=(libcurl-ssl-dev libpq-dev g++ bison zlib1g-dev make postgresql-client shared-mime-info)
- fpm_conflicts+=(ruby-bundler)
- ;;
debian* | ubuntu*)
fpm_depends+=(libcurl-ssl-dev libpq-dev g++ bison zlib1g-dev make postgresql-client shared-mime-info)
;;
diff --git a/services/api/lib/app_version.rb b/services/api/lib/app_version.rb
index 335608b2b6..95685ea5fe 100644
--- a/services/api/lib/app_version.rb
+++ b/services/api/lib/app_version.rb
@@ -2,9 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-# If you change this file, you'll probably also want to make the same
-# changes in apps/workbench/lib/app_version.rb.
-
class AppVersion
def self.git(*args, &block)
IO.popen(["git", "--git-dir", ".git"] + args, "r",
diff --git a/services/api/lib/can_be_an_owner.rb b/services/api/lib/can_be_an_owner.rb
index 6f30f5ae33..e09037819c 100644
--- a/services/api/lib/can_be_an_owner.rb
+++ b/services/api/lib/can_be_an_owner.rb
@@ -22,8 +22,8 @@ module CanBeAnOwner
klass = t.classify.constantize
next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))
base.has_many(t.to_sym,
- foreign_key: :owner_uuid,
- primary_key: :uuid,
+ foreign_key: 'owner_uuid',
+ primary_key: 'uuid',
dependent: :restrict_with_exception)
end
# We need custom protection for changing an owner's primary
@@ -62,7 +62,7 @@ module CanBeAnOwner
# "name" arg is a query label that appears in logs:
"descendant_project_uuids for #{self.uuid}",
# "binds" arg is an array of [col_id, value] for '$1' vars:
- [[nil, self.uuid], [nil, 'project']],
+ [self.uuid, 'project'],
).rows.map do |project_uuid,|
project_uuid
end
@@ -75,7 +75,7 @@ module CanBeAnOwner
# Check for objects that have my old uuid listed as their owner.
self.class.reflect_on_all_associations(:has_many).each do |assoc|
- next unless assoc.foreign_key == :owner_uuid
+ next unless assoc.foreign_key == 'owner_uuid'
if assoc.klass.where(owner_uuid: uuid_was).any?
errors.add(:uuid,
"cannot be changed on a #{self.class} that owns objects")
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
index f421fb5b2a..1d897b39bf 100644
--- a/services/api/lib/config_loader.rb
+++ b/services/api/lib/config_loader.rb
@@ -2,6 +2,16 @@
#
# SPDX-License-Identifier: AGPL-3.0
+# When loading YAML, deserialize :foo as ":foo", rather than raising
+# "Psych::DisallowedClass: Tried to load unspecified class: Symbol"
+class Psych::ScalarScanner
+ alias :orig_tokenize :tokenize
+ def tokenize string
+ return string if string =~ /^:[a-zA-Z]/
+ orig_tokenize(string)
+ end
+end
+
module Psych
module Visitors
class YAMLTree < Psych::Visitors::Visitor
@@ -226,7 +236,7 @@ class ConfigLoader
if erb
yaml = ERB.new(yaml).result(binding)
end
- YAML.load(yaml, deserialize_symbols: false)
+ YAML.safe_load(yaml)
else
{}
end
diff --git a/services/api/lib/current_api_client.rb b/services/api/lib/current_api_client.rb
index ee666b77ab..7c99c911f8 100644
--- a/services/api/lib/current_api_client.rb
+++ b/services/api/lib/current_api_client.rb
@@ -2,16 +2,6 @@
#
# SPDX-License-Identifier: AGPL-3.0
-$system_user = nil
-$system_group = nil
-$all_users_group = nil
-$anonymous_user = nil
-$anonymous_group = nil
-$anonymous_group_read_permission = nil
-$empty_collection = nil
-$public_project_group = nil
-$public_project_group_read_permission = nil
-
module CurrentApiClient
def current_user
Thread.current[:user]
@@ -74,26 +64,26 @@ module CurrentApiClient
end
def system_user
- $system_user = check_cache $system_user do
- real_current_user = Thread.current[:user]
- begin
- Thread.current[:user] = User.new(is_admin: true,
- is_active: true,
- uuid: system_user_uuid)
+ real_current_user = Thread.current[:user]
+ begin
+ Thread.current[:user] = User.new(is_admin: true,
+ is_active: true,
+ uuid: system_user_uuid)
+ $system_user = check_cache($system_user) do
User.where(uuid: system_user_uuid).
first_or_create!(is_active: true,
is_admin: true,
email: 'root',
first_name: 'root',
last_name: '')
- ensure
- Thread.current[:user] = real_current_user
end
+ ensure
+ Thread.current[:user] = real_current_user
end
end
def system_group
- $system_group = check_cache $system_group do
+ $system_group = check_cache($system_group) do
act_as_system_user do
ActiveRecord::Base.transaction do
Group.where(uuid: system_group_uuid).
@@ -120,7 +110,7 @@ module CurrentApiClient
end
def all_users_group
- $all_users_group = check_cache $all_users_group do
+ $all_users_group = check_cache($all_users_group) do
act_as_system_user do
ActiveRecord::Base.transaction do
Group.where(uuid: all_users_group_uuid).
@@ -156,7 +146,7 @@ module CurrentApiClient
end
def anonymous_group
- $anonymous_group = check_cache $anonymous_group do
+ $anonymous_group = check_cache($anonymous_group) do
act_as_system_user do
ActiveRecord::Base.transaction do
Group.where(uuid: anonymous_group_uuid).
@@ -169,8 +159,7 @@ module CurrentApiClient
end
def anonymous_group_read_permission
- $anonymous_group_read_permission =
- check_cache $anonymous_group_read_permission do
+ $anonymous_group_read_permission = check_cache($anonymous_group_read_permission) do
act_as_system_user do
Link.where(tail_uuid: all_users_group.uuid,
head_uuid: anonymous_group.uuid,
@@ -181,7 +170,7 @@ module CurrentApiClient
end
def anonymous_user
- $anonymous_user = check_cache $anonymous_user do
+ $anonymous_user = check_cache($anonymous_user) do
act_as_system_user do
User.where(uuid: anonymous_user_uuid).
first_or_create!(is_active: false,
@@ -201,7 +190,7 @@ module CurrentApiClient
end
def public_project_group
- $public_project_group = check_cache $public_project_group do
+ $public_project_group = check_cache($public_project_group) do
act_as_system_user do
ActiveRecord::Base.transaction do
Group.where(uuid: public_project_uuid).
@@ -214,8 +203,7 @@ module CurrentApiClient
end
def public_project_read_permission
- $public_project_group_read_permission =
- check_cache $public_project_group_read_permission do
+ $public_project_group_read_permission = check_cache($public_project_group_read_permission) do
act_as_system_user do
Link.where(tail_uuid: anonymous_group.uuid,
head_uuid: public_project_group.uuid,
@@ -226,7 +214,7 @@ module CurrentApiClient
end
def anonymous_user_token_api_client
- $anonymous_user_token_api_client = check_cache $anonymous_user_token_api_client do
+ $anonymous_user_token_api_client = check_cache($anonymous_user_token_api_client) do
act_as_system_user do
ActiveRecord::Base.transaction do
ApiClient.find_or_create_by!(is_trusted: false, url_prefix: "", name: "AnonymousUserToken")
@@ -236,7 +224,7 @@ module CurrentApiClient
end
def system_root_token_api_client
- $system_root_token_api_client = check_cache $system_root_token_api_client do
+ $system_root_token_api_client = check_cache($system_root_token_api_client) do
act_as_system_user do
ActiveRecord::Base.transaction do
ApiClient.find_or_create_by!(is_trusted: true, url_prefix: "", name: "SystemRootToken")
@@ -250,7 +238,7 @@ module CurrentApiClient
end
def empty_collection
- $empty_collection = check_cache $empty_collection do
+ $empty_collection = check_cache($empty_collection) do
act_as_system_user do
ActiveRecord::Base.transaction do
Collection.
@@ -269,31 +257,41 @@ module CurrentApiClient
end
end
- private
-
- # If the given value is nil, or the cache has been cleared since it
- # was set, yield. Otherwise, return the given value.
- def check_cache value
- if not Rails.env.test? and
- ActionController::Base.cache_store.is_a? ActiveSupport::Cache::FileStore and
- not File.owned? ActionController::Base.cache_store.cache_path
- # If we don't own the cache dir, we're probably
- # crunch-dispatch. Whoever we are, using this cache is likely to
- # either fail or screw up the cache for someone else. So we'll
- # just assume the $globals are OK to live forever.
- #
- # The reason for making the globals expire with the cache in the
- # first place is to avoid leaking state between test cases: in
- # production, we don't expect the database seeds to ever go away
- # even when the cache is cleared, so there's no particular
- # reason to expire our global variables.
+ # Purge the module globals if necessary. If the cached value is
+ # non-nil and the globals weren't purged, return the cached
+ # value. Otherwise, call the block.
+ #
+ # Purge is only done in test mode.
+ def check_cache(cached)
+ if Rails.env != 'test'
+ return (cached || yield)
+ end
+ t = Rails.cache.fetch "CurrentApiClient.$system_globals_reset" do
+ Time.now.to_f
+ end
+ if t != $system_globals_reset
+ reset_system_globals(t)
+ yield
else
- Rails.cache.fetch "CurrentApiClient.$globals" do
- value = nil
- true
- end
+ cached || yield
end
- return value unless value.nil?
- yield
end
+
+ def reset_system_globals(t)
+ $system_globals_reset = t
+ $system_user = nil
+ $system_group = nil
+ $all_users_group = nil
+ $anonymous_group = nil
+ $anonymous_group_read_permission = nil
+ $anonymous_user = nil
+ $public_project_group = nil
+ $public_project_group_read_permission = nil
+ $anonymous_user_token_api_client = nil
+ $system_root_token_api_client = nil
+ $empty_collection = nil
+ end
+ module_function :reset_system_globals
end
+
+CurrentApiClient.reset_system_globals(0)
diff --git a/services/api/lib/db_current_time.rb b/services/api/lib/db_current_time.rb
index 5e1634ecb9..2d58e3c389 100644
--- a/services/api/lib/db_current_time.rb
+++ b/services/api/lib/db_current_time.rb
@@ -6,10 +6,10 @@ module DbCurrentTime
CURRENT_TIME_SQL = "SELECT clock_timestamp() AT TIME ZONE 'UTC'"
def db_current_time
- Time.parse(ActiveRecord::Base.connection.select_value(CURRENT_TIME_SQL) + " +0000")
+ ActiveRecord::Base.connection.select_value(CURRENT_TIME_SQL)
end
def db_transaction_time
- Time.parse(ActiveRecord::Base.connection.select_value("SELECT current_timestamp AT TIME ZONE 'UTC'") + " +0000")
+ ActiveRecord::Base.connection.select_value("SELECT current_timestamp AT TIME ZONE 'UTC'")
end
end
diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb
index cef76f08a5..6718d384ee 100644
--- a/services/api/lib/enable_jobs_api.rb
+++ b/services/api/lib/enable_jobs_api.rb
@@ -47,7 +47,7 @@ def check_enable_legacy_jobs_api
if Rails.configuration.Containers.JobsAPI.Enable == "false" ||
(Rails.configuration.Containers.JobsAPI.Enable == "auto" &&
- Job.count == 0)
+ ActiveRecord::Base.connection.select_value("SELECT COUNT(*) FROM jobs LIMIT 1") == 0)
Rails.configuration.API.DisabledAPIs.merge! Disable_jobs_api_method_list
end
end
diff --git a/services/api/lib/has_uuid.rb b/services/api/lib/has_uuid.rb
index 2074566941..217113beec 100644
--- a/services/api/lib/has_uuid.rb
+++ b/services/api/lib/has_uuid.rb
@@ -14,14 +14,14 @@ module HasUuid
base.has_many(:links_via_head,
-> { where("not (link_class = 'permission')") },
class_name: 'Link',
- foreign_key: :head_uuid,
- primary_key: :uuid,
+ foreign_key: 'head_uuid',
+ primary_key: 'uuid',
dependent: :destroy)
base.has_many(:links_via_tail,
-> { where("not (link_class = 'permission')") },
class_name: 'Link',
- foreign_key: :tail_uuid,
- primary_key: :uuid,
+ foreign_key: 'tail_uuid',
+ primary_key: 'uuid',
dependent: :destroy)
end
diff --git a/services/api/lib/migrate_yaml_to_json.rb b/services/api/lib/migrate_yaml_to_json.rb
index 1db7ed0113..8987f3364c 100644
--- a/services/api/lib/migrate_yaml_to_json.rb
+++ b/services/api/lib/migrate_yaml_to_json.rb
@@ -8,7 +8,7 @@ module MigrateYAMLToJSON
n = conn.update(
"UPDATE #{table} SET #{column}=$1 WHERE #{column}=$2",
"#{table}.#{column} convert YAML to JSON",
- [[nil, "{}"], [nil, "--- {}\n"]])
+ ["{}", "--- {}\n"])
Rails.logger.info("#{table}.#{column}: #{n} rows updated using empty hash")
finished = false
while !finished
@@ -16,14 +16,14 @@ module MigrateYAMLToJSON
conn.exec_query(
"SELECT id, #{column} FROM #{table} WHERE #{column} LIKE $1 LIMIT 100",
"#{table}.#{column} check for YAML",
- [[nil, '---%']],
+ ['---%'],
).rows.map do |id, yaml|
n += 1
- json = SafeJSON.dump(YAML.load(yaml))
+ json = SafeJSON.dump(YAML.safe_load(yaml))
conn.exec_query(
"UPDATE #{table} SET #{column}=$1 WHERE id=$2 AND #{column}=$3",
"#{table}.#{column} convert YAML to JSON",
- [[nil, json], [nil, id], [nil, yaml]])
+ [json, id, yaml])
end
Rails.logger.info("#{table}.#{column}: #{n} rows updated")
finished = (n == 0)
diff --git a/services/api/lib/record_filters.rb b/services/api/lib/record_filters.rb
index 65c25810ac..e51223254f 100644
--- a/services/api/lib/record_filters.rb
+++ b/services/api/lib/record_filters.rb
@@ -121,9 +121,9 @@ module RecordFilters
end
when 'exists'
if operand == true
- cond_out << "jsonb_exists(#{attr_table_name}.#{attr}, ?)"
+ cond_out << "jsonb_exists_inline_op(#{attr_table_name}.#{attr}, ?)"
elsif operand == false
- cond_out << "(NOT jsonb_exists(#{attr_table_name}.#{attr}, ?)) OR #{attr_table_name}.#{attr} is NULL"
+ cond_out << "(NOT jsonb_exists_inline_op(#{attr_table_name}.#{attr}, ?)) OR #{attr_table_name}.#{attr} is NULL"
else
raise ArgumentError.new("Invalid operand '#{operand}' for '#{operator}' must be true or false")
end
@@ -140,7 +140,7 @@ module RecordFilters
raise ArgumentError.new("Invalid attribute '#{attr}' for operator '#{operator}' in filter")
end
- cond_out << "jsonb_exists(#{attr_table_name}.#{attr}, ?)"
+ cond_out << "jsonb_exists_inline_op(#{attr_table_name}.#{attr}, ?)"
param_out << operand
elsif expr = /^ *\( *(\w+) *(<=?|>=?|=) *(\w+) *\) *$/.match(attr)
if operator != '=' || ![true,"true"].index(operand)
@@ -164,10 +164,10 @@ module RecordFilters
!(col.andand.type == :jsonb && ['contains', '=', '<>', '!='].index(operator))
raise ArgumentError.new("Invalid attribute '#{attr}' in filter")
end
+ attr_type = attr_model_class.attribute_column(attr).type
case operator
when '=', '<', '<=', '>', '>=', '!=', 'like', 'ilike'
- attr_type = attr_model_class.attribute_column(attr).type
operator = '<>' if operator == '!='
if operand.is_a? String
if attr_type == :boolean
@@ -181,8 +181,8 @@ module RecordFilters
when '0', 'f', 'false', 'n', 'no'
operand = false
else
- raise ArgumentError("Invalid operand '#{operand}' for " \
- "boolean attribute '#{attr}'")
+ raise ArgumentError.new("Invalid operand '#{operand}' for " \
+ "boolean attribute '#{attr}'")
end
end
if operator == '<>'
@@ -206,6 +206,10 @@ module RecordFilters
cond_out << "#{attr_table_name}.#{attr} #{operator} ?"
param_out << operand
elsif (attr_type == :integer)
+ if !operand.is_a?(Integer) || operand.bit_length > 64
+ raise ArgumentError.new("Invalid operand '#{operand}' "\
+ "for integer attribute '#{attr}'")
+ end
cond_out << "#{attr_table_name}.#{attr} #{operator} ?"
param_out << operand
else
@@ -213,17 +217,24 @@ module RecordFilters
"for '#{operator}' operator in filters")
end
when 'in', 'not in'
- if operand.is_a? Array
- cond_out << "#{attr_table_name}.#{attr} #{operator} (?)"
- param_out << operand
- if operator == 'not in' and not operand.include?(nil)
- # explicitly allow NULL
- cond_out[-1] = "(#{cond_out[-1]} OR #{attr_table_name}.#{attr} IS NULL)"
- end
- else
+ if !operand.is_a? Array
raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
"for '#{operator}' operator in filters")
end
+ if attr_type == :integer
+ operand.each do |el|
+ if !el.is_a?(Integer) || el.bit_length > 64
+ raise ArgumentError.new("Invalid element '#{el}' in array "\
+ "for integer attribute '#{attr}'")
+ end
+ end
+ end
+ cond_out << "#{attr_table_name}.#{attr} #{operator} (?)"
+ param_out << operand
+ if operator == 'not in' and not operand.include?(nil)
+ # explicitly allow NULL
+ cond_out[-1] = "(#{cond_out[-1]} OR #{attr_table_name}.#{attr} IS NULL)"
+ end
when 'is_a'
operand = [operand] unless operand.is_a? Array
cond = []
@@ -259,13 +270,18 @@ module RecordFilters
raise ArgumentError.new("Invalid element #{operand.inspect} in operand for #{operator.inspect} operator (operand must be a string or array of strings)")
end
end
- # We use jsonb_exists_all(a,b) instead of "a ?& b" because
- # the pg gem thinks "?" is a bind var. And we use string
- # interpolation instead of param_out because the pg gem
- # flattens param_out / doesn't support passing arrays as
- # bind vars.
+ # We use jsonb_exists_all_inline_op(a,b) instead of "a ?&
+ # b" because the pg gem thinks "?" is a bind var.
+ #
+ # See note in migration
+ # 20230815160000_jsonb_exists_functions about _inline_op
+ # functions.
+ #
+ # We use string interpolation instead of param_out
+ # because the pg gem flattens param_out / doesn't support
+ # passing arrays as bind vars.
q = operand.map { |s| ActiveRecord::Base.connection.quote(s) }.join(',')
- cond_out << "jsonb_exists_all(#{attr_table_name}.#{attr}, array[#{q}])"
+ cond_out << "jsonb_exists_all_inline_op(#{attr_table_name}.#{attr}, array[#{q}])"
else
raise ArgumentError.new("Invalid operator '#{operator}'")
end
diff --git a/services/api/lib/serializers.rb b/services/api/lib/serializers.rb
index 37734e0bb4..c25b9060b4 100644
--- a/services/api/lib/serializers.rb
+++ b/services/api/lib/serializers.rb
@@ -16,7 +16,7 @@ class Serializer
end
def self.legacy_load(s)
- val = Psych.safe_load(s)
+ val = Psych.safe_load(s, permitted_classes: [Time])
if val.is_a? String
# If apiserver was downgraded to a YAML-only version after
# storing JSON in the database, the old code would have loaded
diff --git a/services/api/lib/simulate_job_log.rb b/services/api/lib/simulate_job_log.rb
deleted file mode 100644
index abcf42eaa7..0000000000
--- a/services/api/lib/simulate_job_log.rb
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'current_api_client'
-
-module SimulateJobLog
- include CurrentApiClient
- def replay(filename, multiplier = 1, simulated_job_uuid = nil)
- raise "Environment must be development or test" unless [ 'test', 'development' ].include? ENV['RAILS_ENV']
-
- multiplier = multiplier.to_f
- multiplier = 1.0 if multiplier <= 0
-
- actual_start_time = Time.now
- log_start_time = nil
-
- if simulated_job_uuid and (job = Job.where(uuid: simulated_job_uuid).first)
- job_owner_uuid = job.owner_uuid
- else
- job_owner_uuid = system_user_uuid
- end
-
- act_as_system_user do
- File.open(filename).each.with_index do |line, index|
- cols = {}
- cols[:timestamp], rest_of_line = line.split(' ', 2)
- begin
- cols[:timestamp] = Time.strptime( cols[:timestamp], "%Y-%m-%d_%H:%M:%S" )
- rescue ArgumentError
- if line =~ /^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (.*)/
- # Wed Nov 19 07:12:39 2014
- cols[:timestamp] = Time.strptime( $1, "%a %b %d %H:%M:%S %Y" )
- rest_of_line = $2
- else
- STDERR.puts "Ignoring log line because of unknown time format: #{line}"
- end
- end
- cols[:job_uuid], cols[:pid], cols[:task], cols[:event_type], cols[:message] = rest_of_line.split(' ', 5)
- # Override job uuid with a simulated one if specified
- cols[:job_uuid] = simulated_job_uuid || cols[:job_uuid]
- # determine when we want to simulate this log being created, based on the time multiplier
- log_start_time = cols[:timestamp] if log_start_time.nil?
- log_time = cols[:timestamp]
- actual_elapsed_time = Time.now - actual_start_time
- log_elapsed_time = log_time - log_start_time
- modified_elapsed_time = log_elapsed_time / multiplier
- pause_time = modified_elapsed_time - actual_elapsed_time
- sleep pause_time if pause_time > 0
-
- Log.new({
- owner_uuid: job_owner_uuid,
- event_at: Time.zone.local_to_utc(cols[:timestamp]),
- object_uuid: cols[:job_uuid],
- event_type: cols[:event_type],
- properties: { 'text' => line }
- }).save!
- end
- end
-
- end
-end
diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake
index 7a0ab3826a..db1b3667cc 100644
--- a/services/api/lib/tasks/delete_old_container_logs.rake
+++ b/services/api/lib/tasks/delete_old_container_logs.rake
@@ -8,11 +8,9 @@
# from the logs table.
namespace :db do
- desc "Remove old container log entries from the logs table"
+ desc "deprecated / no-op"
task delete_old_container_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND now() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')"
-
- ActiveRecord::Base.connection.execute(delete_sql)
+ Rails.logger.info "this db:delete_old_container_logs rake task is no longer used"
end
end
diff --git a/services/api/lib/tasks/manage_long_lived_tokens.rake b/services/api/lib/tasks/manage_long_lived_tokens.rake
index 7a665ff7e7..70a0f24284 100644
--- a/services/api/lib/tasks/manage_long_lived_tokens.rake
+++ b/services/api/lib/tasks/manage_long_lived_tokens.rake
@@ -31,7 +31,7 @@ namespace :db do
end
if (auth.user.uuid =~ /-tpzed-000000000000000/).nil? and (auth.user.uuid =~ /-tpzed-anonymouspublic/).nil?
CurrentApiClientHelper.act_as_system_user do
- auth.update_attributes!(expires_at: exp_date)
+ auth.update!(expires_at: exp_date)
end
token_count += 1
end
diff --git a/services/api/lib/tasks/replay_job_log.rake b/services/api/lib/tasks/replay_job_log.rake
deleted file mode 100644
index 9c0f005275..0000000000
--- a/services/api/lib/tasks/replay_job_log.rake
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'simulate_job_log'
-desc 'Simulate job logging from a file. Three arguments: log filename, time multipler (optional), simulated job uuid (optional). E.g. (use quotation marks if using spaces between args): rake "replay_job_log[log.txt, 2.0, qr1hi-8i9sb-nf3qk0xzwwz3lre]"'
-task :replay_job_log, [:filename, :multiplier, :uuid] => :environment do |t, args|
- include SimulateJobLog
- abort("No filename specified.") if args[:filename].blank?
- replay( args[:filename], args[:multiplier].to_f, args[:uuid] )
-end
diff --git a/services/api/lib/trashable.rb b/services/api/lib/trashable.rb
index c99b08513b..50611c305d 100644
--- a/services/api/lib/trashable.rb
+++ b/services/api/lib/trashable.rb
@@ -93,19 +93,19 @@ end
module TrashableController
def destroy
if !@object.is_trashed
- @object.update_attributes!(trash_at: db_current_time)
+ @object.update!(trash_at: db_current_time)
end
earliest_delete = (@object.trash_at +
Rails.configuration.Collections.BlobSigningTTL)
if @object.delete_at > earliest_delete
- @object.update_attributes!(delete_at: earliest_delete)
+ @object.update!(delete_at: earliest_delete)
end
show
end
def trash
if !@object.is_trashed
- @object.update_attributes!(trash_at: db_current_time)
+ @object.update!(trash_at: db_current_time)
end
show
end
diff --git a/services/api/lib/update_permissions.rb b/services/api/lib/update_permissions.rb
index b7e5476404..52e3e0c081 100644
--- a/services/api/lib/update_permissions.rb
+++ b/services/api/lib/update_permissions.rb
@@ -2,12 +2,12 @@
#
# SPDX-License-Identifier: AGPL-3.0
-require '20200501150153_permission_table_constants'
+require_relative '20200501150153_permission_table_constants'
REVOKE_PERM = 0
CAN_MANAGE_PERM = 3
-def update_permissions perm_origin_uuid, starting_uuid, perm_level, edge_id=nil
+def update_permissions perm_origin_uuid, starting_uuid, perm_level, edge_id=nil, update_all_users=false
return if Thread.current[:suppress_update_permissions]
#
@@ -100,44 +100,105 @@ def update_permissions perm_origin_uuid, starting_uuid, perm_level, edge_id=nil
# tested this on Postgres 9.6, so in the future we should reevaluate
# the performance & query plan on Postgres 12.
#
+ # Update: as of 2023-10-13, incorrect merge join behavior is still
+ # observed on at least one major user installation that is using
+ # Postgres 14, so it seems this workaround is still needed.
+ #
# https://git.furworks.de/opensourcemirror/postgresql/commit/a314c34079cf06d05265623dd7c056f8fa9d577f
#
# Disable merge join for just this query (also local for this transaction), then reenable it.
ActiveRecord::Base.connection.exec_query "SET LOCAL enable_mergejoin to false;"
- temptable_perms = "temp_perms_#{rand(2**64).to_s(10)}"
- ActiveRecord::Base.connection.exec_query %{
-create temporary table #{temptable_perms} on commit drop
-as select * from compute_permission_subgraph($1, $2, $3, $4)
-},
- 'update_permissions.select',
- [[nil, perm_origin_uuid],
- [nil, starting_uuid],
- [nil, perm_level],
- [nil, edge_id]]
-
- ActiveRecord::Base.connection.exec_query "SET LOCAL enable_mergejoin to true;"
-
- # Now that we have recomputed a set of permissions, delete any
- # rows from the materialized_permissions table where (target_uuid,
- # user_uuid) is not present or has perm_level=0 in the recomputed
- # set.
- ActiveRecord::Base.connection.exec_delete %{
-delete from #{PERMISSION_VIEW} where
- target_uuid in (select target_uuid from #{temptable_perms}) and
- not exists (select 1 from #{temptable_perms}
- where target_uuid=#{PERMISSION_VIEW}.target_uuid and
- user_uuid=#{PERMISSION_VIEW}.user_uuid and
- val>0)
+ if perm_origin_uuid[5..11] == '-tpzed-' && !update_all_users
+ # Modifying permission granted to a user, recompute the all permissions for that user
+
+ ActiveRecord::Base.connection.exec_query %{
+with origin_user_perms as (
+ select pq.origin_uuid as user_uuid, target_uuid, pq.val, pq.traverse_owned from (
+ #{PERM_QUERY_TEMPLATE % {:base_case => %{
+ select '#{perm_origin_uuid}'::varchar(255), '#{perm_origin_uuid}'::varchar(255), 3, true, true
+ where exists (select uuid from users where uuid='#{perm_origin_uuid}')
},
- "update_permissions.delete"
+:edge_perm => %{
+case (edges.edge_id = '#{edge_id}')
+ when true then #{perm_level}
+ else edges.val
+ end
+}
+} }) as pq),
+
+/*
+ Because users always have permission on themselves, this
+ query also makes sure those permission rows are always
+ returned.
+*/
+temptable_perms as (
+ select * from origin_user_perms
+ union all
+ select target_uuid as user_uuid, target_uuid, 3, true
+ from origin_user_perms
+ where origin_user_perms.target_uuid like '_____-tpzed-_______________' and
+ origin_user_perms.target_uuid != '#{perm_origin_uuid}'
+),
+
+/*
+ Now that we have recomputed a set of permissions, delete any
+ rows from the materialized_permissions table where (target_uuid,
+ user_uuid) is not present or has perm_level=0 in the recomputed
+ set.
+*/
+delete_rows as (
+ delete from #{PERMISSION_VIEW} where
+ user_uuid='#{perm_origin_uuid}' and
+ not exists (select 1 from temptable_perms
+ where target_uuid=#{PERMISSION_VIEW}.target_uuid and
+ user_uuid='#{perm_origin_uuid}' and
+ val>0)
+)
+
+/*
+ Now insert-or-update permissions in the recomputed set. The
+ WHERE clause is important to avoid redundantly updating rows
+ that haven't actually changed.
+*/
+insert into #{PERMISSION_VIEW} (user_uuid, target_uuid, perm_level, traverse_owned)
+ select user_uuid, target_uuid, val as perm_level, traverse_owned from temptable_perms where val>0
+on conflict (user_uuid, target_uuid) do update
+set perm_level=EXCLUDED.perm_level, traverse_owned=EXCLUDED.traverse_owned
+where #{PERMISSION_VIEW}.user_uuid=EXCLUDED.user_uuid and
+ #{PERMISSION_VIEW}.target_uuid=EXCLUDED.target_uuid and
+ (#{PERMISSION_VIEW}.perm_level != EXCLUDED.perm_level or
+ #{PERMISSION_VIEW}.traverse_owned != EXCLUDED.traverse_owned);
- # Now insert-or-update permissions in the recomputed set. The
- # WHERE clause is important to avoid redundantly updating rows
- # that haven't actually changed.
+}
+ else
+ # Modifying permission granted to a group, recompute permissions for everything accessible through that group
ActiveRecord::Base.connection.exec_query %{
+with temptable_perms as (
+ select * from compute_permission_subgraph($1, $2, $3, $4)),
+
+/*
+ Now that we have recomputed a set of permissions, delete any
+ rows from the materialized_permissions table where (target_uuid,
+ user_uuid) is not present or has perm_level=0 in the recomputed
+ set.
+*/
+delete_rows as (
+ delete from #{PERMISSION_VIEW} where
+ target_uuid in (select target_uuid from temptable_perms) and
+ not exists (select 1 from temptable_perms
+ where target_uuid=#{PERMISSION_VIEW}.target_uuid and
+ user_uuid=#{PERMISSION_VIEW}.user_uuid and
+ val>0)
+)
+
+/*
+ Now insert-or-update permissions in the recomputed set. The
+ WHERE clause is important to avoid redundantly updating rows
+ that haven't actually changed.
+*/
insert into #{PERMISSION_VIEW} (user_uuid, target_uuid, perm_level, traverse_owned)
- select user_uuid, target_uuid, val as perm_level, traverse_owned from #{temptable_perms} where val>0
+ select user_uuid, target_uuid, val as perm_level, traverse_owned from temptable_perms where val>0
on conflict (user_uuid, target_uuid) do update
set perm_level=EXCLUDED.perm_level, traverse_owned=EXCLUDED.traverse_owned
where #{PERMISSION_VIEW}.user_uuid=EXCLUDED.user_uuid and
@@ -145,7 +206,12 @@ where #{PERMISSION_VIEW}.user_uuid=EXCLUDED.user_uuid and
(#{PERMISSION_VIEW}.perm_level != EXCLUDED.perm_level or
#{PERMISSION_VIEW}.traverse_owned != EXCLUDED.traverse_owned);
},
- "update_permissions.insert"
+ 'update_permissions.select',
+ [perm_origin_uuid,
+ starting_uuid,
+ perm_level,
+ edge_id]
+ end
if perm_level>0
check_permissions_against_full_refresh
diff --git a/services/api/lib/update_priorities.rb b/services/api/lib/update_priorities.rb
new file mode 100644
index 0000000000..94115340df
--- /dev/null
+++ b/services/api/lib/update_priorities.rb
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+def row_lock_for_priority_update container_uuid
+ # Locks all the containers under this container, and also any
+ # immediate parent containers. This ensures we have locked
+ # everything that gets touched by either a priority update or state
+ # update.
+ ActiveRecord::Base.connection.exec_query %{
+ select 1 from containers where containers.uuid in (
+ select pri_container_uuid from container_tree($1)
+UNION
+ select container_requests.requesting_container_uuid from container_requests
+ where container_requests.container_uuid = $1
+ and container_requests.state = 'Committed'
+ and container_requests.requesting_container_uuid is not NULL
+)
+ order by containers.uuid for update
+ }, 'select_for_update_priorities', [container_uuid]
+end
+
+def update_priorities starting_container_uuid
+ # Ensure the row locks were taken in order
+ row_lock_for_priority_update starting_container_uuid
+
+ ActiveRecord::Base.connection.exec_query %{
+update containers set priority=computed.upd_priority from container_tree_priorities($1) as computed
+ where containers.uuid = computed.pri_container_uuid and priority != computed.upd_priority
+}, 'update_priorities', [starting_container_uuid]
+end
diff --git a/services/api/lib/update_priority.rb b/services/api/lib/update_priority.rb
deleted file mode 100644
index 6c17f1bd03..0000000000
--- a/services/api/lib/update_priority.rb
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-module UpdatePriority
- extend CurrentApiClient
-
- # Clean up after races.
- #
- # If container priority>0 but there are no committed container
- # requests for it, reset priority to 0.
- #
- # If container priority=0 but there are committed container requests
- # for it with priority>0, update priority.
- #
- # Normally, update_priority is a no-op if another thread/process is
- # already updating. Test cases that need to check priorities after
- # updating can force a (possibly overlapping) update in the current
- # thread/transaction by setting the "nolock" flag. See #14878.
- def self.update_priority(nolock: false)
- if !File.owned?(Rails.root.join('tmp'))
- Rails.logger.warn("UpdatePriority: not owner of #{Rails.root}/tmp, skipping")
- return
- end
- lockfile = Rails.root.join('tmp', 'update_priority.lock')
- File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|
- return unless nolock || f.flock(File::LOCK_NB|File::LOCK_EX)
-
- # priority>0 but should be 0:
- ActiveRecord::Base.connection.
- exec_query("UPDATE containers AS c SET priority=0 WHERE state IN ('Queued', 'Locked', 'Running') AND priority>0 AND uuid NOT IN (SELECT container_uuid FROM container_requests WHERE priority>0 AND state='Committed');", 'UpdatePriority')
-
- # priority==0 but should be >0:
- act_as_system_user do
- Container.
- joins("JOIN container_requests ON container_requests.container_uuid=containers.uuid AND container_requests.state=#{ActiveRecord::Base.connection.quote(ContainerRequest::Committed)} AND container_requests.priority>0").
- where('containers.state IN (?) AND containers.priority=0 AND container_requests.uuid IS NOT NULL',
- [Container::Queued, Container::Locked, Container::Running]).
- map(&:update_priority!)
- end
- end
- end
-
- def self.run_update_thread
- need = false
- Rails.cache.fetch('UpdatePriority', expires_in: 5.seconds) do
- need = true
- end
- return if !need
-
- Thread.new do
- Thread.current.abort_on_exception = false
- begin
- update_priority
- rescue => e
- Rails.logger.error "#{e.class}: #{e}\n#{e.backtrace.join("\n\t")}"
- ensure
- # Rails 5.1+ makes test threads share a database connection, so we can't
- # close a connection shared with other threads.
- # https://github.com/rails/rails/commit/deba47799ff905f778e0c98a015789a1327d5087
- if Rails.env != "test"
- ActiveRecord::Base.connection.close
- end
- end
- end
- end
-end
diff --git a/services/api/script/arvados-git-sync.rb b/services/api/script/arvados-git-sync.rb
index ad6aaf9eb5..9f8f050c10 100755
--- a/services/api/script/arvados-git-sync.rb
+++ b/services/api/script/arvados-git-sync.rb
@@ -26,7 +26,9 @@ DEBUG = 1
# if present, overriding base config parameters as specified
path = File.absolute_path('../../config/arvados-clients.yml', __FILE__)
if File.exist?(path) then
- cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
+ cp_config = File.open(path) do |f|
+ YAML.safe_load(f, filename: path)[ENV['RAILS_ENV']]
+ end
else
puts "Please create a\n #{path}\n file"
exit 1
diff --git a/services/api/script/migrate-gitolite-to-uuid-storage.rb b/services/api/script/migrate-gitolite-to-uuid-storage.rb
index 91acf3e256..98f25ca537 100755
--- a/services/api/script/migrate-gitolite-to-uuid-storage.rb
+++ b/services/api/script/migrate-gitolite-to-uuid-storage.rb
@@ -40,7 +40,9 @@ DEBUG = 1
# if present, overriding base config parameters as specified
path = File.dirname(__FILE__) + '/config/arvados-clients.yml'
if File.exist?(path) then
- cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
+ cp_config = File.open(path) do |f|
+ YAML.safe_load(f, filename: path)[ENV['RAILS_ENV']]
+ end
else
puts "Please create a\n " + File.dirname(__FILE__) + "/config/arvados-clients.yml\n file"
exit 1
diff --git a/services/api/test/fixtures/authorized_keys.yml b/services/api/test/fixtures/authorized_keys.yml
index 1c14204d98..b2b2c8be1b 100644
--- a/services/api/test/fixtures/authorized_keys.yml
+++ b/services/api/test/fixtures/authorized_keys.yml
@@ -5,6 +5,7 @@
active:
uuid: zzzzz-fngyi-12nc9ov4osp8nae
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
authorized_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
key_type: SSH
name: active
@@ -13,6 +14,7 @@ active:
admin:
uuid: zzzzz-fngyi-g290j3i3u701duh
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
authorized_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
key_type: SSH
name: admin
@@ -21,6 +23,7 @@ admin:
spectator:
uuid: zzzzz-fngyi-3uze1ipbnz2c2c2
owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+ modified_by_user_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
authorized_user_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
key_type: SSH
name: spectator
@@ -29,6 +32,7 @@ spectator:
project_viewer:
uuid: zzzzz-fngyi-5d3av1396niwcej
owner_uuid: zzzzz-tpzed-projectviewer1a
+ modified_by_user_uuid: zzzzz-tpzed-projectviewer1a
authorized_user_uuid: zzzzz-tpzed-projectviewer1a
key_type: SSH
name: project_viewer
diff --git a/services/api/test/fixtures/collections.yml b/services/api/test/fixtures/collections.yml
index 1f2eab73af..72aad1d68e 100644
--- a/services/api/test/fixtures/collections.yml
+++ b/services/api/test/fixtures/collections.yml
@@ -220,6 +220,51 @@ foo_collection_in_aproject:
manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
name: "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+fuse_filters_test_foo:
+ uuid: zzzzz-4zz18-4e2kjqv891jl3p3
+ current_version_uuid: zzzzz-4zz18-4e2kjqv891jl3p3
+ portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ owner_uuid: zzzzz-tpzed-fusefiltertest1
+ created_at: 2024-02-09T12:01:00Z
+ modified_at: 2024-02-09T12:01:01Z
+ updated_at: 2024-02-09T12:01:01Z
+ manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+ name: foo
+ properties:
+ MainFile: foo
+
+fuse_filters_test_bar:
+ uuid: zzzzz-4zz18-qpxqtq2wbjnu630
+ current_version_uuid: zzzzz-4zz18-qpxqtq2wbjnu630
+ portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ owner_uuid: zzzzz-tpzed-fusefiltertest1
+ created_at: 2024-02-09T12:02:00Z
+ modified_at: 2024-02-09T12:02:01Z
+ updated_at: 2024-02-09T12:02:01Z
+ manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+ name: bar
+ properties:
+ MainFile: bar
+
+fuse_filters_test_baz:
+ uuid: zzzzz-4zz18-ls97ezovrkkpfxz
+ current_version_uuid: zzzzz-4zz18-ls97ezovrkkpfxz
+ portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ owner_uuid: zzzzz-tpzed-fusefiltertest1
+ created_at: 2024-02-09T12:03:00Z
+ modified_at: 2024-02-09T12:03:01Z
+ updated_at: 2024-02-09T12:03:01Z
+ manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+ name: baz
+ properties:
+ MainFile: baz
+
user_agreement_in_anonymously_accessible_project:
uuid: zzzzz-4zz18-uukreo9rbgwsujr
current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujr
@@ -987,8 +1032,8 @@ collection_with_list_prop_odd:
listprop: [elem1, elem3, 5]
collection_with_list_prop_even:
- uuid: zzzzz-4zz18-listpropertyeven
- current_version_uuid: zzzzz-4zz18-listpropertyeven
+ uuid: zzzzz-4zz18-listpropertyevn
+ current_version_uuid: zzzzz-4zz18-listpropertyevn
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
@@ -1002,8 +1047,8 @@ collection_with_list_prop_even:
listprop: [elem2, 4, elem6, ELEM8]
collection_with_listprop_elem1:
- uuid: zzzzz-4zz18-listpropelem1
- current_version_uuid: zzzzz-4zz18-listpropelem1
+ uuid: zzzzz-4zz18-listpropelemen1
+ current_version_uuid: zzzzz-4zz18-listpropelemen1
portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
created_at: 2015-02-13T17:22:54Z
@@ -1128,8 +1173,8 @@ collection_<%=i%>_of_10:
uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>
current_version_uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>
owner_uuid: zzzzz-j7d0g-0010collections
- created_at: <%= i.minute.ago.to_s(:db) %>
- modified_at: <%= i.minute.ago.to_s(:db) %>
+ created_at: <%= i.minute.ago.to_fs(:db) %>
+ modified_at: <%= i.minute.ago.to_fs(:db) %>
<% end %>
# collections in project_with_201_collections
@@ -1141,8 +1186,8 @@ collection_<%=i%>_of_201:
uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>
current_version_uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>
owner_uuid: zzzzz-j7d0g-0201collections
- created_at: <%= i.minute.ago.to_s(:db) %>
- modified_at: <%= i.minute.ago.to_s(:db) %>
+ created_at: <%= i.minute.ago.to_fs(:db) %>
+ modified_at: <%= i.minute.ago.to_fs(:db) %>
<% end %>
# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/container_requests.yml b/services/api/test/fixtures/container_requests.yml
index dca89f56d3..71c7a54df3 100644
--- a/services/api/test/fixtures/container_requests.yml
+++ b/services/api/test/fixtures/container_requests.yml
@@ -8,9 +8,9 @@ queued:
name: queued
state: Committed
priority: 1
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- modified_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ modified_at: <%= 1.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -32,9 +32,9 @@ running:
name: running
state: Committed
priority: 501
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- modified_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ modified_at: <%= 1.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -55,9 +55,9 @@ requester_for_running:
name: requester_for_running_cr
state: Committed
priority: 1
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 2.minute.ago.to_s(:db) %>
- modified_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 2.minute.ago.to_fs(:db) %>
+ modified_at: <%= 2.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -102,9 +102,9 @@ completed:
name: completed container request
state: Final
priority: 1
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- modified_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ modified_at: <%= 1.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -124,7 +124,7 @@ completed-older:
name: completed
state: Final
priority: 1
- created_at: <%= 30.minute.ago.to_s(:db) %>
+ created_at: <%= 30.minute.ago.to_fs(:db) %>
updated_at: 2016-01-11 11:11:11.111111111 Z
modified_at: 2016-01-11 11:11:11.111111111 Z
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -413,7 +413,7 @@ cr_for_requester2:
name: requester_cr2
state: Final
priority: 1
- created_at: <%= 30.minute.ago.to_s(:db) %>
+ created_at: <%= 30.minute.ago.to_fs(:db) %>
updated_at: 2016-01-11 11:11:11.111111111 Z
modified_at: 2016-01-11 11:11:11.111111111 Z
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -535,13 +535,13 @@ canceled_with_running_container:
running_to_be_deleted:
uuid: zzzzz-xvhdp-cr5runningcntnr
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ owner_uuid: zzzzz-j7d0g-rew6elm53kancon
name: running to be deleted
state: Committed
priority: 1
- created_at: <%= 2.days.ago.to_s(:db) %>
- updated_at: <%= 1.days.ago.to_s(:db) %>
- modified_at: <%= 1.days.ago.to_s(:db) %>
+ created_at: <%= 2.days.ago.to_fs(:db) %>
+ updated_at: <%= 1.days.ago.to_fs(:db) %>
+ modified_at: <%= 1.days.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -562,9 +562,9 @@ completed_with_input_mounts:
name: completed container request
state: Final
priority: 1
- created_at: <%= 24.hour.ago.to_s(:db) %>
- updated_at: <%= 24.hour.ago.to_s(:db) %>
- modified_at: <%= 24.hour.ago.to_s(:db) %>
+ created_at: <%= 24.hour.ago.to_fs(:db) %>
+ updated_at: <%= 24.hour.ago.to_fs(:db) %>
+ modified_at: <%= 24.hour.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -598,9 +598,9 @@ uncommitted:
uuid: zzzzz-xvhdp-cr4uncommittedc
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
name: uncommitted
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- modified_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ modified_at: <%= 1.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
command: ["arvados-cwl-runner", "--local", "--api=containers",
"/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
@@ -1019,9 +1019,9 @@ cr_in_trashed_project:
name: completed container request
state: Final
priority: 1
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- modified_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ modified_at: <%= 1.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -1041,9 +1041,9 @@ runtime_token:
name: queued
state: Committed
priority: 1
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- modified_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ modified_at: <%= 1.minute.ago.to_fs(:db) %>
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
container_image: test
cwd: test
@@ -1065,7 +1065,7 @@ runtime_token:
<% for i in 1..60 do %>
cr_<%=i%>_of_60:
uuid: zzzzz-xvhdp-oneof60crs<%= i.to_s.rjust(5, '0') %>
- created_at: <%= ((i+5)/5).hour.ago.to_s(:db) %>
+ created_at: <%= ((i+5)/5).hour.ago.to_fs(:db) %>
owner_uuid: zzzzz-j7d0g-nnncrspipelines
name: cr-<%= i.to_s %>
output_path: test
diff --git a/services/api/test/fixtures/containers.yml b/services/api/test/fixtures/containers.yml
index a61fb07177..46bc1e50f9 100644
--- a/services/api/test/fixtures/containers.yml
+++ b/services/api/test/fixtures/containers.yml
@@ -33,9 +33,9 @@ running:
owner_uuid: zzzzz-tpzed-000000000000000
state: Running
priority: 12
- created_at: <%= 1.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- started_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ started_at: <%= 1.minute.ago.to_fs(:db) %>
container_image: test
cwd: /tmp
output_path: /tmp
@@ -59,9 +59,9 @@ running_older:
owner_uuid: zzzzz-tpzed-000000000000000
state: Running
priority: 1
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 2.minute.ago.to_s(:db) %>
- started_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 2.minute.ago.to_fs(:db) %>
+ started_at: <%= 2.minute.ago.to_fs(:db) %>
container_image: test
cwd: /tmp
output_path: /tmp
@@ -81,10 +81,10 @@ locked:
owner_uuid: zzzzz-tpzed-000000000000000
state: Locked
locked_by_uuid: zzzzz-gj3su-k9dvestay1plssr
- priority: 2
- created_at: <%= 2.minute.ago.to_s(:db) %>
- updated_at: <%= 2.minute.ago.to_s(:db) %>
- modified_at: <%= 2.minute.ago.to_s(:db) %>
+ priority: 0
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ updated_at: <%= 2.minute.ago.to_fs(:db) %>
+ modified_at: <%= 2.minute.ago.to_fs(:db) %>
container_image: test
cwd: test
output_path: test
@@ -170,6 +170,7 @@ diagnostics_completed_requester:
]
runtime_constraints:
API: true
+ keep_cache_disk: 0
keep_cache_ram: 268435456
ram: 1342177280
vcpus: 1
@@ -195,6 +196,7 @@ diagnostics_completed_hasher1:
]
runtime_constraints:
API: true
+ keep_cache_disk: 0
keep_cache_ram: 268435456
ram: 268435456
vcpus: 1
@@ -220,6 +222,7 @@ diagnostics_completed_hasher2:
]
runtime_constraints:
API: true
+ keep_cache_disk: 0
keep_cache_ram: 268435456
ram: 268435456
vcpus: 2
@@ -245,6 +248,7 @@ diagnostics_completed_hasher3:
]
runtime_constraints:
API: true
+ keep_cache_disk: 0
keep_cache_ram: 268435456
ram: 268435456
vcpus: 1
@@ -281,6 +285,7 @@ diagnostics_completed_requester2:
]
runtime_constraints:
API: true
+ keep_cache_disk: 0
keep_cache_ram: 268435456
ram: 1342177280
vcpus: 1
@@ -348,8 +353,8 @@ ancient_container_with_logs:
state: Complete
exit_code: 0
priority: 1
- created_at: <%= 2.year.ago.to_s(:db) %>
- updated_at: <%= 2.year.ago.to_s(:db) %>
+ created_at: <%= 2.year.ago.to_fs(:db) %>
+ updated_at: <%= 2.year.ago.to_fs(:db) %>
container_image: test
cwd: test
output_path: test
@@ -357,7 +362,7 @@ ancient_container_with_logs:
runtime_constraints:
ram: 12000000000
vcpus: 4
- finished_at: <%= 2.year.ago.to_s(:db) %>
+ finished_at: <%= 2.year.ago.to_fs(:db) %>
log: ea10d51bcf88862dbcc36eb292017dfd+45
output: test
secret_mounts: {}
@@ -369,8 +374,8 @@ previous_container_with_logs:
state: Complete
exit_code: 0
priority: 1
- created_at: <%= 1.month.ago.to_s(:db) %>
- updated_at: <%= 1.month.ago.to_s(:db) %>
+ created_at: <%= 1.month.ago.to_fs(:db) %>
+ updated_at: <%= 1.month.ago.to_fs(:db) %>
container_image: test
cwd: test
output_path: test
@@ -378,7 +383,7 @@ previous_container_with_logs:
runtime_constraints:
ram: 12000000000
vcpus: 4
- finished_at: <%= 1.month.ago.to_s(:db) %>
+ finished_at: <%= 1.month.ago.to_fs(:db) %>
log: ea10d51bcf88862dbcc36eb292017dfd+45
output: test
secret_mounts: {}
@@ -389,8 +394,8 @@ running_container_with_logs:
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
state: Running
priority: 1
- created_at: <%= 1.hour.ago.to_s(:db) %>
- updated_at: <%= 1.hour.ago.to_s(:db) %>
+ created_at: <%= 1.hour.ago.to_fs(:db) %>
+ updated_at: <%= 1.hour.ago.to_fs(:db) %>
container_image: test
cwd: test
output_path: test
@@ -411,9 +416,9 @@ running_to_be_deleted:
owner_uuid: zzzzz-tpzed-000000000000000
state: Running
priority: 1
- created_at: <%= 1.minute.ago.to_s(:db) %>
- updated_at: <%= 1.minute.ago.to_s(:db) %>
- started_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
+ updated_at: <%= 1.minute.ago.to_fs(:db) %>
+ started_at: <%= 1.minute.ago.to_fs(:db) %>
container_image: test
cwd: test
output_path: test
diff --git a/services/api/test/fixtures/groups.yml b/services/api/test/fixtures/groups.yml
index 9a2dc169b6..9034ac6ee7 100644
--- a/services/api/test/fixtures/groups.yml
+++ b/services/api/test/fixtures/groups.yml
@@ -172,6 +172,17 @@ afiltergroup5:
properties:
filters: [["collections.properties.listprop","contains","elem1"],["uuid", "is_a", "arvados#collection"]]
+fuse_filters_test_project:
+ uuid: zzzzz-j7d0g-fusefiltertest1
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2024-02-09T12:00:00Z
+ modified_at: 2024-02-09T12:00:01Z
+ updated_at: 2024-02-09T12:00:01Z
+ name: FUSE Filters Test Project 1
+ group_class: project
+
future_project_viewing_group:
uuid: zzzzz-j7d0g-futrprojviewgrp
owner_uuid: zzzzz-tpzed-000000000000000
diff --git a/services/api/test/fixtures/job_tasks.yml b/services/api/test/fixtures/job_tasks.yml
index 7131da6f5e..6a857a02f2 100644
--- a/services/api/test/fixtures/job_tasks.yml
+++ b/services/api/test/fixtures/job_tasks.yml
@@ -5,11 +5,11 @@
running_job_task_1:
uuid: zzzzz-ot0gb-runningjobtask1
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
job_uuid: zzzzz-8i9sb-with2components
running_job_task_2:
uuid: zzzzz-ot0gb-runningjobtask2
owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
- created_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
job_uuid: zzzzz-8i9sb-with2components
diff --git a/services/api/test/fixtures/jobs.yml b/services/api/test/fixtures/jobs.yml
index 9280aeab93..54b38259ba 100644
--- a/services/api/test/fixtures/jobs.yml
+++ b/services/api/test/fixtures/jobs.yml
@@ -8,8 +8,8 @@ running:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 2.7.minute.ago.to_s(:db) %>
- started_at: <%= 2.7.minute.ago.to_s(:db) %>
+ created_at: <%= 2.7.minute.ago.to_fs(:db) %>
+ started_at: <%= 2.7.minute.ago.to_fs(:db) %>
finished_at: ~
script: hash
repository: active/foo
@@ -32,11 +32,11 @@ running:
running_cancelled:
uuid: zzzzz-8i9sb-4cf0nhn6xte809j
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: <%= 1.minute.ago.to_s(:db) %>
+ cancelled_at: <%= 1.minute.ago.to_fs(:db) %>
cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- created_at: <%= 4.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
finished_at: ~
script: hash
repository: active/foo
@@ -63,9 +63,9 @@ uses_nonexistent_script_version:
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
- created_at: <%= 5.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
- finished_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 5.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
+ finished_at: <%= 2.minute.ago.to_fs(:db) %>
script: hash
repository: active/foo
running: false
@@ -94,9 +94,9 @@ foobar:
script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
script_parameters:
input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
- created_at: <%= 4.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
- finished_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
+ finished_at: <%= 2.minute.ago.to_fs(:db) %>
running: false
success: true
output: fa7aeb5140e2848d39b416daeef4ffc5+45
@@ -122,9 +122,9 @@ barbaz:
script_parameters:
input: fa7aeb5140e2848d39b416daeef4ffc5+45
an_integer: 1
- created_at: <%= 4.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
- finished_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
+ finished_at: <%= 2.minute.ago.to_fs(:db) %>
running: false
success: true
repository: active/foo
@@ -151,9 +151,9 @@ runningbarbaz:
script_parameters:
input: fa7aeb5140e2848d39b416daeef4ffc5+45
an_integer: 1
- created_at: <%= 4.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
- finished_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
+ finished_at: <%= 2.minute.ago.to_fs(:db) %>
running: true
success: ~
repository: active/foo
@@ -172,8 +172,8 @@ runningbarbaz:
previous_job_run:
uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
- created_at: <%= 14.minute.ago.to_s(:db) %>
- finished_at: <%= 13.minutes.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
+ finished_at: <%= 13.minutes.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -189,8 +189,8 @@ previous_job_run:
previous_job_run_nil_log:
uuid: zzzzz-8i9sb-cjs4pklxxjykqq3
- created_at: <%= 14.minute.ago.to_s(:db) %>
- finished_at: <%= 13.minutes.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
+ finished_at: <%= 13.minutes.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -206,8 +206,8 @@ previous_job_run_nil_log:
previous_ancient_job_run:
uuid: zzzzz-8i9sb-ahd7cie8jah9qui
- created_at: <%= 366.days.ago.to_s(:db) %>
- finished_at: <%= 365.days.ago.to_s(:db) %>
+ created_at: <%= 366.days.ago.to_fs(:db) %>
+ finished_at: <%= 365.days.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -223,7 +223,7 @@ previous_ancient_job_run:
previous_docker_job_run:
uuid: zzzzz-8i9sb-k6emstgk4kw4yhi
- created_at: <%= 14.minute.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -242,7 +242,7 @@ previous_docker_job_run:
previous_ancient_docker_image_job_run:
uuid: zzzzz-8i9sb-t3b460aolxxuldl
- created_at: <%= 144.minute.ago.to_s(:db) %>
+ created_at: <%= 144.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -260,7 +260,7 @@ previous_ancient_docker_image_job_run:
previous_job_run_with_arvados_sdk_version:
uuid: zzzzz-8i9sb-eoo0321or2dw2jg
- created_at: <%= 14.minute.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -281,7 +281,7 @@ previous_job_run_with_arvados_sdk_version:
previous_job_run_no_output:
uuid: zzzzz-8i9sb-cjs4pklxxjykppp
- created_at: <%= 14.minute.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash
@@ -297,7 +297,7 @@ previous_job_run_no_output:
previous_job_run_superseded_by_hash_branch:
# This supplied_script_version is a branch name with later commits.
uuid: zzzzz-8i9sb-aeviezu5dahph3e
- created_at: <%= 15.minute.ago.to_s(:db) %>
+ created_at: <%= 15.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/shabranchnames
script: testscript
@@ -311,7 +311,7 @@ previous_job_run_superseded_by_hash_branch:
nondeterminisic_job_run:
uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
- created_at: <%= 14.minute.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: active/foo
script: hash2
@@ -326,14 +326,14 @@ nondeterminisic_job_run:
nearly_finished_job:
uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
- created_at: <%= 14.minute.ago.to_s(:db) %>
+ created_at: <%= 14.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
repository: arvados
script: doesnotexist
script_version: 309e25a64fe994867db8459543af372f850e25b9
script_parameters:
input: b519d9cb706a29fc7ea24dbea2f05851+249025
- started_at: <%= 3.minute.ago.to_s(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
finished_at: ~
running: true
success: ~
@@ -348,7 +348,7 @@ nearly_finished_job:
queued:
uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
cancelled_at: ~
cancelled_by_user_uuid: ~
@@ -382,11 +382,11 @@ job_with_real_log:
cancelled:
uuid: zzzzz-8i9sb-4cf0abc123e809j
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- cancelled_at: <%= 1.minute.ago.to_s(:db) %>
+ cancelled_at: <%= 1.minute.ago.to_fs(:db) %>
cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
- created_at: <%= 4.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
finished_at: ~
script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
running: false
@@ -432,8 +432,8 @@ running_will_be_completed:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 3.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
finished_at: ~
script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
running: true
@@ -499,9 +499,9 @@ job_with_latest_version:
supplied_script_version: main
script_parameters:
input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
- created_at: <%= 3.minute.ago.to_s(:db) %>
- started_at: <%= 2.minute.ago.to_s(:db) %>
- finished_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
+ started_at: <%= 2.minute.ago.to_fs(:db) %>
+ finished_at: <%= 1.minute.ago.to_fs(:db) %>
running: false
success: true
output: fa7aeb5140e2848d39b416daeef4ffc5+45
@@ -544,8 +544,8 @@ completed_job_in_publicly_accessible_project:
log: zzzzz-4zz18-4en62shvi99lxd4
output: b519d9cb706a29fc7ea24dbea2f05851+93
script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
- started_at: <%= 10.minute.ago.to_s(:db) %>
- finished_at: <%= 5.minute.ago.to_s(:db) %>
+ started_at: <%= 10.minute.ago.to_fs(:db) %>
+ finished_at: <%= 5.minute.ago.to_fs(:db) %>
job_in_publicly_accessible_project_but_other_objects_elsewhere:
uuid: zzzzz-8i9sb-jyq01muyhgr4ofj
@@ -568,8 +568,8 @@ running_job_with_components:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 3.minute.ago.to_s(:db) %>
- started_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
+ started_at: <%= 3.minute.ago.to_fs(:db) %>
finished_at: ~
script: hash
repository: active/foo
@@ -599,8 +599,8 @@ running_job_with_components_at_level_1:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
finished_at: ~
repository: active/foo
script: hash
@@ -630,8 +630,8 @@ running_job_with_components_at_level_2:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
finished_at: ~
repository: active/foo
script: hash
@@ -660,8 +660,8 @@ running_job_1_with_components_at_level_3:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
finished_at: ~
repository: active/foo
script: hash
@@ -687,8 +687,8 @@ running_job_2_with_components_at_level_3:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
finished_at: ~
repository: active/foo
script: hash
@@ -715,8 +715,8 @@ running_job_1_with_circular_component_relationship:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
finished_at: ~
repository: active/foo
script: hash
@@ -744,8 +744,8 @@ running_job_2_with_circular_component_relationship:
cancelled_at: ~
cancelled_by_user_uuid: ~
cancelled_by_client_uuid: ~
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
finished_at: ~
repository: active/foo
script: hash
diff --git a/services/api/test/fixtures/keep_disks.yml b/services/api/test/fixtures/keep_disks.yml
index e8424b26fa..5cccf498af 100644
--- a/services/api/test/fixtures/keep_disks.yml
+++ b/services/api/test/fixtures/keep_disks.yml
@@ -7,9 +7,9 @@ nonfull:
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
- last_read_at: <%= 1.minute.ago.to_s(:db) %>
- last_write_at: <%= 2.minute.ago.to_s(:db) %>
- last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+ last_read_at: <%= 1.minute.ago.to_fs(:db) %>
+ last_write_at: <%= 2.minute.ago.to_fs(:db) %>
+ last_ping_at: <%= 3.minute.ago.to_fs(:db) %>
ping_secret: z9xz2tc69dho51g1dmkdy5fnupdhsprahcwxdbjs0zms4eo6i
full:
@@ -17,9 +17,9 @@ full:
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
- last_read_at: <%= 1.minute.ago.to_s(:db) %>
- last_write_at: <%= 2.day.ago.to_s(:db) %>
- last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+ last_read_at: <%= 1.minute.ago.to_fs(:db) %>
+ last_write_at: <%= 2.day.ago.to_fs(:db) %>
+ last_ping_at: <%= 3.minute.ago.to_fs(:db) %>
ping_secret: xx3ieejcufbjy4lli6yt5ig4e8w5l2hhgmbyzpzuq38gri6lj
nonfull2:
@@ -27,7 +27,7 @@ nonfull2:
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
node_uuid: zzzzz-7ekkf-2z3mc76g2q73aio
keep_service_uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
- last_read_at: <%= 1.minute.ago.to_s(:db) %>
- last_write_at: <%= 2.minute.ago.to_s(:db) %>
- last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+ last_read_at: <%= 1.minute.ago.to_fs(:db) %>
+ last_write_at: <%= 2.minute.ago.to_fs(:db) %>
+ last_ping_at: <%= 3.minute.ago.to_fs(:db) %>
ping_secret: 4rs260ibhdum1d242xy23qv320rlerc0j7qg9vyqnchbgmjeek
diff --git a/services/api/test/fixtures/links.yml b/services/api/test/fixtures/links.yml
index b7f1aaa1fa..00d5971534 100644
--- a/services/api/test/fixtures/links.yml
+++ b/services/api/test/fixtures/links.yml
@@ -54,7 +54,7 @@ active_user_member_of_all_users_group:
updated_at: 2014-01-24 20:42:26 -0800
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
link_class: permission
- name: can_read
+ name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
@@ -110,7 +110,7 @@ spectator_user_member_of_all_users_group:
updated_at: 2014-01-24 20:42:26 -0800
tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
link_class: permission
- name: can_read
+ name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
@@ -124,7 +124,7 @@ inactive_user_member_of_all_users_group:
updated_at: 2013-12-26T20:52:21Z
tail_uuid: zzzzz-tpzed-x9kqpd79egh49c7
link_class: permission
- name: can_read
+ name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
@@ -138,7 +138,7 @@ inactive_signed_ua_user_member_of_all_users_group:
updated_at: 2013-12-26T20:52:21Z
tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
link_class: permission
- name: can_read
+ name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
@@ -433,7 +433,7 @@ project_viewer_member_of_all_users_group:
updated_at: 2015-07-28T21:34:41.361747000Z
tail_uuid: zzzzz-tpzed-projectviewer1a
link_class: permission
- name: can_read
+ name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
@@ -1044,7 +1044,7 @@ user1-with-load_member_of_all_users_group:
updated_at: 2014-01-24 20:42:26 -0800
tail_uuid: zzzzz-tpzed-user1withloadab
link_class: permission
- name: can_read
+ name: can_write
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
@@ -1139,3 +1139,17 @@ public_favorites_permission_link:
name: can_read
head_uuid: zzzzz-j7d0g-publicfavorites
properties: {}
+
+future_project_user_member_of_all_users_group:
+ uuid: zzzzz-o0j2j-cdnq6627g0h0r2a
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2015-07-28T21:34:41.361747000Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2015-07-28T21:34:41.361747000Z
+ updated_at: 2015-07-28T21:34:41.361747000Z
+ tail_uuid: zzzzz-tpzed-futureprojview2
+ link_class: permission
+ name: can_write
+ head_uuid: zzzzz-j7d0g-fffffffffffffff
+ properties: {}
diff --git a/services/api/test/fixtures/logs.yml b/services/api/test/fixtures/logs.yml
index 25f1efff62..3b41550ae7 100644
--- a/services/api/test/fixtures/logs.yml
+++ b/services/api/test/fixtures/logs.yml
@@ -8,8 +8,8 @@ noop: # nothing happened ...to the 'spectator' user
owner_uuid: zzzzz-tpzed-000000000000000
object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
object_owner_uuid: zzzzz-tpzed-000000000000000
- event_at: <%= 1.minute.ago.to_s(:db) %>
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ event_at: <%= 1.minute.ago.to_fs(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
admin_changes_repository2: # admin changes repository2, which is owned by active user
id: 2
@@ -17,8 +17,8 @@ admin_changes_repository2: # admin changes repository2, which is owned by active
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
- created_at: <%= 2.minute.ago.to_s(:db) %>
- event_at: <%= 2.minute.ago.to_s(:db) %>
+ created_at: <%= 2.minute.ago.to_fs(:db) %>
+ event_at: <%= 2.minute.ago.to_fs(:db) %>
event_type: update
admin_changes_specimen: # admin changes specimen owned_by_spectator
@@ -27,8 +27,8 @@ admin_changes_specimen: # admin changes specimen owned_by_spectator
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
- created_at: <%= 3.minute.ago.to_s(:db) %>
- event_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
+ event_at: <%= 3.minute.ago.to_fs(:db) %>
event_type: update
system_adds_foo_file: # foo collection added, readable by active through link
@@ -37,8 +37,8 @@ system_adds_foo_file: # foo collection added, readable by active through link
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: zzzzz-4zz18-znfnqtbbv4spc3w # foo file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
- created_at: <%= 4.minute.ago.to_s(:db) %>
- event_at: <%= 4.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
+ event_at: <%= 4.minute.ago.to_fs(:db) %>
event_type: create
system_adds_baz: # baz collection added, readable by active and spectator through group 'all users' group membership
@@ -47,8 +47,8 @@ system_adds_baz: # baz collection added, readable by active and spectator throug
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: zzzzz-4zz18-y9vne9npefyxh8g # baz file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
- created_at: <%= 5.minute.ago.to_s(:db) %>
- event_at: <%= 5.minute.ago.to_s(:db) %>
+ created_at: <%= 5.minute.ago.to_fs(:db) %>
+ event_at: <%= 5.minute.ago.to_fs(:db) %>
event_type: create
log_owned_by_active:
@@ -57,7 +57,7 @@ log_owned_by_active:
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
- event_at: <%= 2.minute.ago.to_s(:db) %>
+ event_at: <%= 2.minute.ago.to_fs(:db) %>
summary: non-admin use can read own logs
crunchstat_for_running_job:
@@ -162,16 +162,16 @@ stderr_for_ancient_container:
modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer01
- event_at: <%= 2.year.ago.to_s(:db) %>
+ event_at: <%= 2.year.ago.to_fs(:db) %>
event_type: stderr
summary: ~
properties:
text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
- created_at: <%= 2.year.ago.to_s(:db) %>
- updated_at: <%= 2.year.ago.to_s(:db) %>
- modified_at: <%= 2.year.ago.to_s(:db) %>
+ created_at: <%= 2.year.ago.to_fs(:db) %>
+ updated_at: <%= 2.year.ago.to_fs(:db) %>
+ modified_at: <%= 2.year.ago.to_fs(:db) %>
object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
crunchstat_for_ancient_container:
@@ -181,16 +181,16 @@ crunchstat_for_ancient_container:
modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer01
- event_at: <%= 2.year.ago.to_s(:db) %>
+ event_at: <%= 2.year.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
- created_at: <%= 2.year.ago.to_s(:db) %>
- updated_at: <%= 2.year.ago.to_s(:db) %>
- modified_at: <%= 2.year.ago.to_s(:db) %>
+ created_at: <%= 2.year.ago.to_fs(:db) %>
+ updated_at: <%= 2.year.ago.to_fs(:db) %>
+ modified_at: <%= 2.year.ago.to_fs(:db) %>
object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
stderr_for_previous_container:
@@ -200,16 +200,16 @@ stderr_for_previous_container:
modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer02
- event_at: <%= 1.month.ago.to_s(:db) %>
+ event_at: <%= 1.month.ago.to_fs(:db) %>
event_type: stderr
summary: ~
properties:
text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
- created_at: <%= 1.month.ago.to_s(:db) %>
- updated_at: <%= 1.month.ago.to_s(:db) %>
- modified_at: <%= 1.month.ago.to_s(:db) %>
+ created_at: <%= 1.month.ago.to_fs(:db) %>
+ updated_at: <%= 1.month.ago.to_fs(:db) %>
+ modified_at: <%= 1.month.ago.to_fs(:db) %>
object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
crunchstat_for_previous_container:
@@ -219,16 +219,16 @@ crunchstat_for_previous_container:
modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer02
- event_at: <%= 1.month.ago.to_s(:db) %>
+ event_at: <%= 1.month.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
- created_at: <%= 1.month.ago.to_s(:db) %>
- updated_at: <%= 1.month.ago.to_s(:db) %>
- modified_at: <%= 1.month.ago.to_s(:db) %>
+ created_at: <%= 1.month.ago.to_fs(:db) %>
+ updated_at: <%= 1.month.ago.to_fs(:db) %>
+ modified_at: <%= 1.month.ago.to_fs(:db) %>
object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
stderr_for_running_container:
@@ -238,16 +238,16 @@ stderr_for_running_container:
modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer03
- event_at: <%= 1.hour.ago.to_s(:db) %>
+ event_at: <%= 1.hour.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
- created_at: <%= 1.hour.ago.to_s(:db) %>
- updated_at: <%= 1.hour.ago.to_s(:db) %>
- modified_at: <%= 1.hour.ago.to_s(:db) %>
+ created_at: <%= 1.hour.ago.to_fs(:db) %>
+ updated_at: <%= 1.hour.ago.to_fs(:db) %>
+ modified_at: <%= 1.hour.ago.to_fs(:db) %>
object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
crunchstat_for_running_container:
@@ -257,14 +257,14 @@ crunchstat_for_running_container:
modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
object_uuid: zzzzz-dz642-logscontainer03
- event_at: <%= 1.hour.ago.to_s(:db) %>
+ event_at: <%= 1.hour.ago.to_fs(:db) %>
event_type: crunchstat
summary: ~
properties:
text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
0.9900 sys'
- created_at: <%= 1.hour.ago.to_s(:db) %>
- updated_at: <%= 1.hour.ago.to_s(:db) %>
- modified_at: <%= 1.hour.ago.to_s(:db) %>
+ created_at: <%= 1.hour.ago.to_fs(:db) %>
+ updated_at: <%= 1.hour.ago.to_fs(:db) %>
+ modified_at: <%= 1.hour.ago.to_fs(:db) %>
object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
diff --git a/services/api/test/fixtures/nodes.yml b/services/api/test/fixtures/nodes.yml
index 821a6b5e42..d4589ed705 100644
--- a/services/api/test/fixtures/nodes.yml
+++ b/services/api/test/fixtures/nodes.yml
@@ -9,8 +9,8 @@ busy:
slot_number: 0
domain: ""
ip_address: 172.17.2.172
- last_ping_at: <%= 1.minute.ago.to_s(:db) %>
- first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+ last_ping_at: <%= 1.minute.ago.to_fs(:db) %>
+ first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
job_uuid: zzzzz-8i9sb-2gx6rz0pjl033w3 # nearly_finished_job
properties: {}
info:
@@ -24,8 +24,8 @@ down:
slot_number: 1
domain: ""
ip_address: 172.17.2.173
- last_ping_at: <%= 1.hour.ago.to_s(:db) %>
- first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+ last_ping_at: <%= 1.hour.ago.to_fs(:db) %>
+ first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
job_uuid: ~
properties: {}
info:
@@ -38,8 +38,8 @@ idle:
slot_number: 2
domain: ""
ip_address: 172.17.2.174
- last_ping_at: <%= 2.minute.ago.to_s(:db) %>
- first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+ last_ping_at: <%= 2.minute.ago.to_fs(:db) %>
+ first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
job_uuid: ~
info:
ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
@@ -54,8 +54,8 @@ was_idle_now_down:
slot_number: ~
domain: ""
ip_address: 172.17.2.174
- last_ping_at: <%= 1.hour.ago.to_s(:db) %>
- first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+ last_ping_at: <%= 1.hour.ago.to_fs(:db) %>
+ first_ping_at: <%= 23.hour.ago.to_fs(:db) %>
job_uuid: ~
info:
ping_secret: "1bd1yi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
diff --git a/services/api/test/fixtures/pipeline_instances.yml b/services/api/test/fixtures/pipeline_instances.yml
index a504c9fadd..714fc60771 100644
--- a/services/api/test/fixtures/pipeline_instances.yml
+++ b/services/api/test/fixtures/pipeline_instances.yml
@@ -6,19 +6,19 @@ new_pipeline:
state: New
uuid: zzzzz-d1hrv-f4gneyn6br1xize
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
new_pipeline_in_subproject:
state: New
uuid: zzzzz-d1hrv-subprojpipeline
owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
has_component_with_no_script_parameters:
state: Ready
uuid: zzzzz-d1hrv-1xfj6xkicf2muk2
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 10.minute.ago.to_s(:db) %>
+ created_at: <%= 10.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -29,7 +29,7 @@ has_component_with_empty_script_parameters:
state: Ready
uuid: zzzzz-d1hrv-jq16l10gcsnyumo
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -46,9 +46,9 @@ has_component_with_completed_jobs:
state: Complete
uuid: zzzzz-d1hrv-i3e77t9z5y8j9cc
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 11.minute.ago.to_s(:db) %>
- started_at: <%= 10.minute.ago.to_s(:db) %>
- finished_at: <%= 9.minute.ago.to_s(:db) %>
+ created_at: <%= 11.minute.ago.to_fs(:db) %>
+ started_at: <%= 10.minute.ago.to_fs(:db) %>
+ finished_at: <%= 9.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -57,9 +57,9 @@ has_component_with_completed_jobs:
job:
uuid: zzzzz-8i9sb-rft1xdewxkwgxnz
script_version: main
- created_at: <%= 10.minute.ago.to_s(:db) %>
- started_at: <%= 10.minute.ago.to_s(:db) %>
- finished_at: <%= 9.minute.ago.to_s(:db) %>
+ created_at: <%= 10.minute.ago.to_fs(:db) %>
+ started_at: <%= 10.minute.ago.to_fs(:db) %>
+ finished_at: <%= 9.minute.ago.to_fs(:db) %>
state: Complete
tasks_summary:
failed: 0
@@ -73,8 +73,8 @@ has_component_with_completed_jobs:
job:
uuid: zzzzz-8i9sb-r2dtbzr6bfread7
script_version: main
- created_at: <%= 9.minute.ago.to_s(:db) %>
- started_at: <%= 9.minute.ago.to_s(:db) %>
+ created_at: <%= 9.minute.ago.to_fs(:db) %>
+ started_at: <%= 9.minute.ago.to_fs(:db) %>
state: Running
tasks_summary:
failed: 0
@@ -88,7 +88,7 @@ has_component_with_completed_jobs:
job:
uuid: zzzzz-8i9sb-c7408rni11o7r6s
script_version: main
- created_at: <%= 9.minute.ago.to_s(:db) %>
+ created_at: <%= 9.minute.ago.to_fs(:db) %>
state: Queued
tasks_summary: {}
@@ -97,7 +97,7 @@ has_job:
state: Ready
uuid: zzzzz-d1hrv-1yfj6xkidf2muk3
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 2.9.minute.ago.to_s(:db) %>
+ created_at: <%= 2.9.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -112,7 +112,7 @@ components_is_jobspec:
# Helps test that clients cope with funny-shaped components.
# For an example, see #3321.
uuid: zzzzz-d1hrv-1yfj61234abcdk4
- created_at: <%= 4.minute.ago.to_s(:db) %>
+ created_at: <%= 4.minute.ago.to_fs(:db) %>
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
@@ -132,7 +132,7 @@ pipeline_with_tagged_collection_input:
state: Ready
uuid: zzzzz-d1hrv-1yfj61234abcdk3
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 3.2.minute.ago.to_s(:db) %>
+ created_at: <%= 3.2.minute.ago.to_fs(:db) %>
components:
part-one:
script_parameters:
@@ -145,7 +145,7 @@ pipeline_to_merge_params:
uuid: zzzzz-d1hrv-1yfj6dcba4321k3
pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 3.3.minute.ago.to_s(:db) %>
+ created_at: <%= 3.3.minute.ago.to_fs(:db) %>
components:
part-one:
script_parameters:
@@ -260,7 +260,7 @@ pipeline_in_publicly_accessible_project:
name: Pipeline in publicly accessible project
pipeline_template_uuid: zzzzz-p5p6p-tmpltpublicproj
state: Complete
- created_at: <%= 30.minute.ago.to_s(:db) %>
+ created_at: <%= 30.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -363,8 +363,8 @@ pipeline_in_running_state:
name: running_with_job
uuid: zzzzz-d1hrv-runningpipeline
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 2.8.minute.ago.to_s(:db) %>
- started_at: <%= 2.8.minute.ago.to_s(:db) %>
+ created_at: <%= 2.8.minute.ago.to_fs(:db) %>
+ started_at: <%= 2.8.minute.ago.to_fs(:db) %>
state: RunningOnServer
components:
foo:
@@ -379,7 +379,7 @@ running_pipeline_with_complete_job:
uuid: zzzzz-d1hrv-partdonepipelin
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
state: RunningOnServer
- created_at: <%= 15.minute.ago.to_s(:db) %>
+ created_at: <%= 15.minute.ago.to_fs(:db) %>
components:
previous:
job:
@@ -393,9 +393,9 @@ complete_pipeline_with_two_jobs:
uuid: zzzzz-d1hrv-twodonepipeline
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
state: Complete
- created_at: <%= 2.5.minute.ago.to_s(:db) %>
- started_at: <%= 2.minute.ago.to_s(:db) %>
- finished_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 2.5.minute.ago.to_fs(:db) %>
+ started_at: <%= 2.minute.ago.to_fs(:db) %>
+ finished_at: <%= 1.minute.ago.to_fs(:db) %>
components:
ancient:
job:
@@ -409,7 +409,7 @@ complete_pipeline_with_two_jobs:
failed_pipeline_with_two_jobs:
uuid: zzzzz-d1hrv-twofailpipeline
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 55.minute.ago.to_s(:db) %>
+ created_at: <%= 55.minute.ago.to_fs(:db) %>
state: Failed
components:
ancient:
@@ -426,8 +426,8 @@ job_child_pipeline_with_components_at_level_2:
state: RunningOnServer
uuid: zzzzz-d1hrv-picomponentsl02
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -436,8 +436,8 @@ job_child_pipeline_with_components_at_level_2:
job:
uuid: zzzzz-8i9sb-job1atlevel3noc
script_version: main
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
state: Running
tasks_summary:
failed: 0
@@ -451,8 +451,8 @@ job_child_pipeline_with_components_at_level_2:
job:
uuid: zzzzz-8i9sb-job2atlevel3noc
script_version: main
- created_at: <%= 12.hour.ago.to_s(:db) %>
- started_at: <%= 12.hour.ago.to_s(:db) %>
+ created_at: <%= 12.hour.ago.to_fs(:db) %>
+ started_at: <%= 12.hour.ago.to_fs(:db) %>
state: Running
tasks_summary:
failed: 0
@@ -470,9 +470,9 @@ pipeline_<%=i%>_of_10:
name: pipeline_<%= i %>
uuid: zzzzz-d1hrv-10pipelines0<%= i.to_s.rjust(3, '0') %>
owner_uuid: zzzzz-j7d0g-000010pipelines
- created_at: <%= (2*(i-1)).hour.ago.to_s(:db) %>
- started_at: <%= (2*(i-1)).hour.ago.to_s(:db) %>
- finished_at: <%= (i-1).minute.ago.to_s(:db) %>
+ created_at: <%= (2*(i-1)).hour.ago.to_fs(:db) %>
+ started_at: <%= (2*(i-1)).hour.ago.to_fs(:db) %>
+ finished_at: <%= (i-1).minute.ago.to_fs(:db) %>
state: Failed
components:
foo:
@@ -494,7 +494,7 @@ pipeline_<%=i%>_of_2_pipelines_and_60_crs:
state: New
uuid: zzzzz-d1hrv-abcgneyn6brx<%= i.to_s.rjust(3, '0') %>
owner_uuid: zzzzz-j7d0g-nnncrspipelines
- created_at: <%= i.minute.ago.to_s(:db) %>
+ created_at: <%= i.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
@@ -513,9 +513,9 @@ pipeline_<%=i%>_of_25:
state: Failed
uuid: zzzzz-d1hrv-25pipelines0<%= i.to_s.rjust(3, '0') %>
owner_uuid: zzzzz-j7d0g-000025pipelines
- created_at: <%= i.hour.ago.to_s(:db) %>
- started_at: <%= i.hour.ago.to_s(:db) %>
- finished_at: <%= i.minute.ago.to_s(:db) %>
+ created_at: <%= i.hour.ago.to_fs(:db) %>
+ started_at: <%= i.hour.ago.to_fs(:db) %>
+ finished_at: <%= i.minute.ago.to_fs(:db) %>
components:
foo:
script: foo
diff --git a/services/api/test/fixtures/workflows.yml b/services/api/test/fixtures/workflows.yml
index 29b76abb45..ad9c7d2676 100644
--- a/services/api/test/fixtures/workflows.yml
+++ b/services/api/test/fixtures/workflows.yml
@@ -28,7 +28,7 @@ workflow_with_input_specifications:
owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
name: Workflow with input specifications
description: this workflow has inputs specified
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
definition: |
cwlVersion: v1.0
class: CommandLineTool
@@ -54,7 +54,7 @@ workflow_with_input_defaults:
owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
name: Workflow with default input specifications
description: this workflow has inputs specified
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
definition: |
cwlVersion: v1.0
class: CommandLineTool
@@ -73,7 +73,7 @@ workflow_with_wrr:
owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
name: Workflow with WorkflowRunnerResources
description: this workflow has WorkflowRunnerResources
- created_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_fs(:db) %>
definition: |
cwlVersion: v1.0
class: CommandLineTool
diff --git a/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb b/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
index 9c70f6f417..60b4133f9a 100644
--- a/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
@@ -199,6 +199,19 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
assert_not_empty(json_response['uuid'])
end
+ [
+ :active_noscope,
+ :active_all_collections,
+ :active_userlist,
+ :foo_collection_sharing_token,
+ ].each do |auth|
+ test "#{auth} can get current token without the appropriate scope" do
+ authorize_with auth
+ get :current
+ assert_response :success
+ end
+ end
+
test "get current token, no auth" do
get :current
assert_response 401
diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb
index af11715982..43797035bc 100644
--- a/services/api/test/functional/arvados/v1/collections_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/collections_controller_test.rb
@@ -374,6 +374,24 @@ EOS
"Expected 'duplicate key' error in #{response_errors.first}")
end
+ [false, true].each do |ensure_unique_name|
+ test "create failure with duplicate name, ensure_unique_name #{ensure_unique_name}" do
+ authorize_with :active
+ post :create, params: {
+ collection: {
+ owner_uuid: users(:active).uuid,
+ manifest_text: "",
+ name: "this...............................................................................................................................................................................................................................................................name is too long"
+ },
+ ensure_unique_name: ensure_unique_name
+ }
+ assert_response 422
+ # check the real error isn't masked by an
+ # ensure_unique_name-related error (#19698)
+ assert_match /value too long for type/, json_response['errors'][0]
+ end
+ end
+
[false, true].each do |unsigned|
test "create with duplicate name, ensure_unique_name, unsigned=#{unsigned}" do
permit_unsigned_manifests unsigned
@@ -391,7 +409,7 @@ EOS
ensure_unique_name: true
}
assert_response :success
- assert_match /^owned_by_active \(\d{4}-\d\d-\d\d.*?Z\)$/, json_response['name']
+ assert_match /^owned_by_active \(#{json_response['uuid'][-15..-1]}\)$/, json_response['name']
end
end
@@ -1204,6 +1222,20 @@ EOS
assert_nil json_response['trash_at']
end
+ test 'untrash a trashed collection by assigning nil to trash_at' do
+ authorize_with :active
+ post :update, params: {
+ id: collections(:expired_collection).uuid,
+ collection: {
+ trash_at: nil,
+ },
+ include_trash: true,
+ }
+ assert_response 200
+ assert_equal false, json_response['is_trashed']
+ assert_nil json_response['trash_at']
+ end
+
test 'untrash error on not trashed collection' do
authorize_with :active
post :untrash, params: {
@@ -1253,7 +1285,7 @@ EOS
assert_equal false, json_response['is_trashed']
assert_nil json_response['trash_at']
assert_nil json_response['delete_at']
- assert_match /^same name for trashed and persisted collections \(\d{4}-\d\d-\d\d.*?Z\)$/, json_response['name']
+ assert_match /^same name for trashed and persisted collections \(#{json_response['uuid'][-15..-1]}\)$/, json_response['name']
end
test 'cannot show collection in trashed subproject' do
diff --git a/services/api/test/functional/arvados/v1/container_requests_controller_test.rb b/services/api/test/functional/arvados/v1/container_requests_controller_test.rb
index e99af39c9c..87eb37cde7 100644
--- a/services/api/test/functional/arvados/v1/container_requests_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/container_requests_controller_test.rb
@@ -8,8 +8,9 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
def minimal_cr
{
command: ['echo', 'hello'],
- container_image: 'test',
+ container_image: 'arvados/apitestfixture:latest',
output_path: 'test',
+ runtime_constraints: {vcpus: 1, ram: 1}
}
end
@@ -18,7 +19,7 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
sp = {'partitions' => ['test1', 'test2']}
post :create, params: {
- container_request: minimal_cr.merge(scheduling_parameters: sp.dup)
+ container_request: minimal_cr.merge(scheduling_parameters: sp.dup, state: "Committed")
}
assert_response :success
@@ -26,6 +27,20 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
assert_not_nil cr, 'Expected container request'
assert_equal sp['partitions'], cr['scheduling_parameters']['partitions']
assert_equal false, cr['scheduling_parameters']['preemptible']
+ assert_equal false, cr['scheduling_parameters']['supervisor']
+ end
+
+ test 'create a-c-r should be supervisor' do
+ authorize_with :active
+
+ post :create, params: {
+ container_request: minimal_cr.merge(command: ["arvados-cwl-runner", "my-workflow.cwl"], state: "Committed")
+ }
+ assert_response :success
+
+ cr = JSON.parse(@response.body)
+ assert_not_nil cr, 'Expected container request'
+ assert_equal true, cr['scheduling_parameters']['supervisor']
end
test "secret_mounts not in #create responses" do
@@ -88,7 +103,7 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
test "update without deleting secret_mounts" do
authorize_with :active
req = container_requests(:uncommitted)
- req.update_attributes!(secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}})
+ req.update!(secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}})
patch :update, params: {
id: req.uuid,
@@ -154,7 +169,7 @@ class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
test "filter on container subproperty runtime_status[foo] = bar" do
ctr = containers(:running)
act_as_system_user do
- ctr.update_attributes!(runtime_status: {foo: 'bar'})
+ ctr.update!(runtime_status: {foo: 'bar'})
end
authorize_with :active
get :index, params: {
diff --git a/services/api/test/functional/arvados/v1/containers_controller_test.rb b/services/api/test/functional/arvados/v1/containers_controller_test.rb
index 5b8ec0f638..07fa5c3211 100644
--- a/services/api/test/functional/arvados/v1/containers_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/containers_controller_test.rb
@@ -160,4 +160,33 @@ class Arvados::V1::ContainersControllerTest < ActionController::TestCase
assert_equal "v2/#{json_response['uuid']}/#{json_response['api_token']}", api_client_authorizations(:container_runtime_token).token
assert_equal 'arvados#apiClientAuthorization', json_response['kind']
end
+
+ test 'update_priority' do
+ ActiveRecord::Base.connection.execute "update containers set priority=0 where uuid='#{containers(:running).uuid}'"
+ authorize_with :admin
+ post :update_priority, params: {id: containers(:running).uuid}
+ assert_response :success
+ assert_not_equal 0, Container.find_by_uuid(containers(:running).uuid).priority
+ end
+
+ test 'update runtime_status, runtime_status is toplevel key' do
+ authorize_with :dispatch1
+ c = containers(:running)
+ patch :update, params: {id: containers(:running).uuid, runtime_status: {activity: "foo", activityDetail: "bar"}}
+ assert_response :success
+ end
+
+ test 'update runtime_status, container is toplevel key' do
+ authorize_with :dispatch1
+ c = containers(:running)
+ patch :update, params: {id: containers(:running).uuid, container: {runtime_status: {activity: "foo", activityDetail: "bar"}}}
+ assert_response :success
+ end
+
+ test 'update state, state is toplevel key' do
+ authorize_with :dispatch1
+ c = containers(:running)
+ patch :update, params: {id: containers(:running).uuid, state: "Complete", runtime_status: {activity: "finishing"}}
+ assert_response :success
+ end
end
diff --git a/services/api/test/functional/arvados/v1/filters_test.rb b/services/api/test/functional/arvados/v1/filters_test.rb
index 3916d63c5e..5d343314ce 100644
--- a/services/api/test/functional/arvados/v1/filters_test.rb
+++ b/services/api/test/functional/arvados/v1/filters_test.rb
@@ -39,6 +39,41 @@ class Arvados::V1::FiltersTest < ActionController::TestCase
assert_match(/no longer supported/, json_response['errors'].join(' '))
end
+ test 'error message for int64 overflow' do
+ # some versions of ActiveRecord cast >64-bit ints to postgres
+ # numeric type, but this is never useful because database content
+ # is 64 bit.
+ @controller = Arvados::V1::LogsController.new
+ authorize_with :active
+ get :index, params: {
+ filters: [['id', '=', 123412341234123412341234]],
+ }
+ assert_response 422
+ assert_match(/Invalid operand .* integer attribute/, json_response['errors'].join(' '))
+ end
+
+ ['in', 'not in'].each do |operator|
+ test "error message for int64 overflow ('#{operator}' filter)" do
+ @controller = Arvados::V1::ContainerRequestsController.new
+ authorize_with :active
+ get :index, params: {
+ filters: [['priority', operator, [9, 123412341234123412341234]]],
+ }
+ assert_response 422
+ assert_match(/Invalid element .* integer attribute/, json_response['errors'].join(' '))
+ end
+ end
+
+ test 'error message for invalid boolean operand' do
+ @controller = Arvados::V1::GroupsController.new
+ authorize_with :active
+ get :index, params: {
+ filters: [['is_trashed', '=', 'fourty']],
+ }
+ assert_response 422
+ assert_match(/Invalid operand .* boolean attribute/, json_response['errors'].join(' '))
+ end
+
test 'api responses provide timestamps with nanoseconds' do
@controller = Arvados::V1::CollectionsController.new
authorize_with :active
diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb
index cfcb33d40a..ee7f716c80 100644
--- a/services/api/test/functional/arvados/v1/groups_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/groups_controller_test.rb
@@ -330,6 +330,38 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
assert_equal 0, json_response['items'].count
end
+ test 'get group-owned objects with select' do
+ authorize_with :active
+ get :contents, params: {
+ id: groups(:aproject).uuid,
+ limit: 100,
+ format: :json,
+ select: ["uuid", "storage_classes_desired"]
+ }
+ assert_response :success
+ assert_equal 17, json_response['items_available']
+ assert_equal 17, json_response['items'].count
+ json_response['items'].each do |item|
+ # Expect collections to have a storage_classes field, other items should not.
+ if item["kind"] == "arvados#collection"
+ assert !item["storage_classes_desired"].nil?
+ else
+ assert item["storage_classes_desired"].nil?
+ end
+ end
+ end
+
+ test 'get group-owned objects with invalid field in select' do
+ authorize_with :active
+ get :contents, params: {
+ id: groups(:aproject).uuid,
+ limit: 100,
+ format: :json,
+ select: ["uuid", "storage_classes_desire"]
+ }
+ assert_response 422
+ end
+
test 'get group-owned objects with additional filter matching nothing' do
authorize_with :active
get :contents, params: {
@@ -442,7 +474,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
assert_not_equal(new_project['uuid'],
groups(:aproject).uuid,
"create returned same uuid as existing project")
- assert_match(/^A Project \(\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{3}Z\)$/,
+ assert_match(/^A Project \(#{new_project['uuid'][-15..-1]}\)$/,
new_project['name'])
end
@@ -768,7 +800,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
ensure_unique_name: true
}
assert_response :success
- assert_match /^trashed subproject 3 \(\d{4}-\d\d-\d\d.*?Z\)$/, json_response['name']
+ assert_match /^trashed subproject 3 \(#{json_response['uuid'][-15..-1]}\)$/, json_response['name']
end
test "move trashed subproject to new owner #{auth}" do
@@ -952,7 +984,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
innertrash = Collection.create!(name: 'inner-trashed', owner_uuid: innerproj.uuid, trash_at: trashtime)
innertrashproj = Group.create!(group_class: 'project', name: 'inner-trashed-proj', owner_uuid: innerproj.uuid, trash_at: trashtime)
outertrash = Collection.create!(name: 'outer-trashed', owner_uuid: outerproj.uuid, trash_at: trashtime)
- innerproj.update_attributes!(frozen_by_uuid: users(:active).uuid)
+ innerproj.update!(frozen_by_uuid: users(:active).uuid)
get :contents, params: {id: outerproj.uuid, include_trash: true, recursive: true}
assert_response :success
uuids = json_response['items'].collect { |item| item['uuid'] }
diff --git a/services/api/test/functional/arvados/v1/management_controller_test.rb b/services/api/test/functional/arvados/v1/management_controller_test.rb
index 6d27bccfc4..d8d2d52c89 100644
--- a/services/api/test/functional/arvados/v1/management_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/management_controller_test.rb
@@ -39,7 +39,8 @@ class Arvados::V1::ManagementControllerTest < ActionController::TestCase
@request.headers['Authorization'] = "Bearer configuredmanagementtoken"
get :metrics
assert_response :success
- assert_equal 'text/plain', @response.content_type
+ assert_equal 'text/plain', @response.media_type
+ assert_equal 'utf-8', @response.charset
assert_match /\narvados_config_source_timestamp_seconds{sha256="#{hash}"} #{Regexp.escape mtime.utc.to_f.to_s}\n/, @response.body
diff --git a/services/api/test/functional/arvados/v1/nodes_controller_test.rb b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
index c61a57ecc8..47f6c5ff3f 100644
--- a/services/api/test/functional/arvados/v1/nodes_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
@@ -211,17 +211,6 @@ class Arvados::V1::NodesControllerTest < ActionController::TestCase
assert_response 403
end
- test "job readable after updating other attributes" do
- authorize_with :admin
- post :update, params: {
- id: nodes(:busy).uuid,
- node: {last_ping_at: 1.second.ago},
- }
- assert_response :success
- assert_equal(jobs(:nearly_finished_job).uuid, json_response["job_uuid"],
- "mismatched job UUID after ping update")
- end
-
test "node should fail ping with invalid hostname config format" do
Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%04' # should end with "04d"
post :ping, params: {
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
index 89feecb454..65a2b64b8a 100644
--- a/services/api/test/functional/arvados/v1/schema_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/schema_controller_test.rb
@@ -9,7 +9,6 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
setup do forget end
teardown do forget end
def forget
- Rails.cache.delete 'arvados_v1_rest_discovery'
AppVersion.forget
end
@@ -84,7 +83,7 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
group_index_params = discovery_doc['resources']['groups']['methods']['index']['parameters']
group_contents_params = discovery_doc['resources']['groups']['methods']['contents']['parameters']
- assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive', 'include', 'include_old_versions']).sort
+ assert_equal group_contents_params.keys.sort, (group_index_params.keys + ['uuid', 'recursive', 'include', 'include_old_versions']).sort
recursive_param = group_contents_params['recursive']
assert_equal 'boolean', recursive_param['type']
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
index 6a7b00a005..cc0b5e1320 100644
--- a/services/api/test/functional/arvados/v1/users_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/users_controller_test.rb
@@ -68,7 +68,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
test "respond 401 if given token exists but user record is missing" do
authorize_with :valid_token_deleted_user
- get :current, {format: :json}
+ get :current, format: :json
assert_response 401
end
@@ -151,7 +151,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
"foo/#{repo_name}", created['uuid'], 'arvados#repository', true, 'Repository'
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -335,7 +335,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
# two extra links; system_group, and group
verify_links_added 2
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', response_object['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
@@ -420,7 +420,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -458,7 +458,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@@ -511,7 +511,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_equal active_user[:email], created['email'], 'expected input email'
# verify links
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
@@ -545,7 +545,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
assert_equal active_user['email'], created['email'], 'expected original email'
# verify links
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
assert_equal(repos_count, repos_query.count)
@@ -666,7 +666,7 @@ The Arvados team.
assert_equal active_user['uuid'], json_response['uuid']
updated = User.where(uuid: active_user['uuid']).first
assert_equal(true, updated.is_active)
- assert_equal({read: true}, updated.group_permissions[all_users_group_uuid])
+ assert_equal({read: true, write: true}, updated.group_permissions[all_users_group_uuid])
end
test "non-admin user can get basic information about readable users" do
@@ -889,7 +889,7 @@ The Arvados team.
['dst', :project_viewer_trustedclient]].each do |which_scoped, auth|
test "refuse to merge with scoped #{which_scoped} token" do
act_as_system_user do
- api_client_authorizations(auth).update_attributes(scopes: ["GET /", "POST /", "PUT /"])
+ api_client_authorizations(auth).update(scopes: ["GET /", "POST /", "PUT /"])
end
authorize_with(:active_trustedclient)
post(:merge, params: {
@@ -1043,12 +1043,16 @@ The Arvados team.
existinguuid = 'remot-tpzed-foobarbazwazqux'
newuuid = 'remot-tpzed-newnarnazwazqux'
unchanginguuid = 'remot-tpzed-nochangingattrs'
+ conflictinguuid1 = 'remot-tpzed-conflictingnam1'
+ conflictinguuid2 = 'remot-tpzed-conflictingnam2'
act_as_system_user do
User.create!(uuid: existinguuid, email: 'root@existing.example.com')
User.create!(uuid: unchanginguuid, email: 'root@unchanging.example.com', prefs: {'foo' => {'bar' => 'baz'}})
end
assert_equal(1, Log.where(object_uuid: unchanginguuid).count)
+ Rails.configuration.Login.LoginCluster = 'remot'
+
authorize_with(:admin)
patch(:batch_update,
params: {
@@ -1059,15 +1063,28 @@ The Arvados team.
'is_active' => true,
'is_admin' => true,
'prefs' => {'foo' => 'bar'},
+ 'is_invited' => true
},
newuuid => {
'first_name' => 'noot',
'email' => 'root@remot.example.com',
'username' => '',
+ 'is_invited' => true
},
unchanginguuid => {
'email' => 'root@unchanging.example.com',
'prefs' => {'foo' => {'bar' => 'baz'}},
+ 'is_invited' => true
+ },
+ conflictinguuid1 => {
+ 'email' => 'root@conflictingname1.example.com',
+ 'username' => 'active',
+ 'is_invited' => true
+ },
+ conflictinguuid2 => {
+ 'email' => 'root@conflictingname2.example.com',
+ 'username' => 'federatedactive',
+ 'is_invited' => true
},
}})
assert_response(:success)
@@ -1084,7 +1101,38 @@ The Arvados team.
assert_equal(1, Log.where(object_uuid: unchanginguuid).count)
end
- NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
+ test 'batch update does not produce spurious log events' do
+ # test for bug #21304
+
+ existinguuid = 'remot-tpzed-foobarbazwazqux'
+ act_as_system_user do
+ User.create!(uuid: existinguuid,
+ first_name: 'root',
+ is_active: true,
+ )
+ end
+ assert_equal(1, Log.where(object_uuid: existinguuid).count)
+
+ Rails.configuration.Login.LoginCluster = 'remot'
+
+ authorize_with(:admin)
+ patch(:batch_update,
+ params: {
+ updates: {
+ existinguuid => {
+ 'first_name' => 'root',
+ 'email' => '',
+ 'username' => '',
+ 'is_active' => true,
+ 'is_invited' => true
+ },
+ }})
+ assert_response(:success)
+
+ assert_equal(1, Log.where(object_uuid: existinguuid).count)
+ end
+
+ NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "is_admin", "is_invited", "email", "first_name",
"last_name", "username", "can_write", "can_manage"].sort
def check_non_admin_index
diff --git a/services/api/test/functional/user_sessions_controller_test.rb b/services/api/test/functional/user_sessions_controller_test.rb
index 66aff787bd..cf4c6e8b4d 100644
--- a/services/api/test/functional/user_sessions_controller_test.rb
+++ b/services/api/test/functional/user_sessions_controller_test.rb
@@ -6,124 +6,30 @@ require 'test_helper'
class UserSessionsControllerTest < ActionController::TestCase
- test "redirect to joshid" do
- api_client_page = 'http://client.example.com/home'
- get :login, params: {return_to: api_client_page}
- # Not supported any more
- assert_response 404
- end
-
- test "send token when user is already logged in" do
- authorize_with :inactive
- api_client_page = 'http://client.example.com/home'
- get :login, params: {return_to: api_client_page}
- assert_response :redirect
- assert_equal(0, @response.redirect_url.index(api_client_page + '?'),
- 'Redirect url ' + @response.redirect_url +
- ' should start with ' + api_client_page + '?')
- assert_not_nil assigns(:api_client)
- end
-
- test "login creates token without expiration by default" do
- assert_equal Rails.configuration.Login.TokenLifetime, 0
- authorize_with :inactive
- api_client_page = 'http://client.example.com/home'
- get :login, params: {return_to: api_client_page}
- assert_response :redirect
- assert_not_nil assigns(:api_client)
- assert_nil assigns(:api_client_auth).expires_at
- end
-
- test "login creates token with configured lifetime" do
- token_lifetime = 1.hour
- Rails.configuration.Login.TokenLifetime = token_lifetime
- authorize_with :inactive
- api_client_page = 'http://client.example.com/home'
- get :login, params: {return_to: api_client_page}
- assert_response :redirect
- assert_not_nil assigns(:api_client)
- api_client_auth = assigns(:api_client_auth)
- assert_in_delta(api_client_auth.expires_at,
- api_client_auth.updated_at + token_lifetime,
- 1.second)
- end
-
- [[0, 1.hour, 1.hour],
- [1.hour, 2.hour, 1.hour],
- [2.hour, 1.hour, 1.hour],
- [2.hour, nil, 2.hour],
- ].each do |config_lifetime, request_lifetime, expect_lifetime|
- test "login with TokenLifetime=#{config_lifetime} and request has expires_at=#{ request_lifetime.nil? ? "nil" : request_lifetime }" do
- Rails.configuration.Login.TokenLifetime = config_lifetime
- expected_expiration_time = Time.now() + expect_lifetime
- authorize_with :inactive
- @request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken
- if request_lifetime.nil?
- get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
- else
- get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com", expires_at: Time.now() + request_lifetime}, return_to: ',https://app.example'}
- end
- assert_response :redirect
- api_client_auth = assigns(:api_client_auth)
- assert_not_nil api_client_auth
- assert_not_nil assigns(:api_client)
- assert_in_delta(api_client_auth.expires_at,
- expected_expiration_time,
- 1.second)
- end
- end
-
- test "login with remote param returns a salted token" do
- authorize_with :inactive
- api_client_page = 'http://client.example.com/home'
- remote_prefix = 'zbbbb'
- get :login, params: {return_to: api_client_page, remote: remote_prefix}
- assert_response :redirect
- api_client_auth = assigns(:api_client_auth)
- assert_not_nil api_client_auth
- assert_includes(@response.redirect_url, 'api_token='+api_client_auth.salted_token(remote: remote_prefix))
+ setup do
+ @allowed_return_to = ",https://controller.api.client.invalid"
end
- test "login with malformed remote param returns an error" do
- authorize_with :inactive
- api_client_page = 'http://client.example.com/home'
- remote_prefix = 'invalid_cluster_id'
- get :login, params: {return_to: api_client_page, remote: remote_prefix}
- assert_response 400
- end
-
- test "login to LoginCluster" do
- Rails.configuration.Login.LoginCluster = 'zbbbb'
- Rails.configuration.RemoteClusters['zbbbb'] = ConfigLoader.to_OrderedOptions({'Host' => 'zbbbb.example.com'})
- api_client_page = 'http://client.example.com/home'
- get :login, params: {return_to: api_client_page}
- assert_response :redirect
- assert_equal("https://zbbbb.example.com/login?return_to=http%3A%2F%2Fclient.example.com%2Fhome", @response.redirect_url)
- assert_nil assigns(:api_client)
- end
-
- test "don't go into redirect loop if LoginCluster is self" do
- Rails.configuration.Login.LoginCluster = 'zzzzz'
- api_client_page = 'http://client.example.com/home'
- get :login, params: {return_to: api_client_page}
- # Doesn't redirect, just fail.
+ test "login route deleted" do
+ @request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken
+ get :login, params: {provider: 'controller', return_to: @allowed_return_to}
assert_response 404
end
test "controller cannot create session without SystemRootToken" do
- get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: @allowed_return_to}
assert_response 401
end
test "controller cannot create session with wrong SystemRootToken" do
@request.headers['Authorization'] = 'Bearer blah'
- get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: @allowed_return_to}
assert_response 401
end
test "controller can create session using SystemRootToken" do
@request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken
- get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: ',https://app.example'}
+ get :create, params: {provider: 'controller', auth_info: {email: "foo@bar.com"}, return_to: @allowed_return_to}
assert_response :redirect
api_client_auth = assigns(:api_client_auth)
assert_not_nil api_client_auth
diff --git a/services/api/test/helpers/users_test_helper.rb b/services/api/test/helpers/users_test_helper.rb
index 6ca9977a5e..e106d994cd 100644
--- a/services/api/test/helpers/users_test_helper.rb
+++ b/services/api/test/helpers/users_test_helper.rb
@@ -3,6 +3,8 @@
# SPDX-License-Identifier: AGPL-3.0
module UsersTestHelper
+ include CurrentApiClient
+
def verify_link(response_items, link_object_name, expect_link, link_class,
link_name, head_uuid, tail_uuid, head_kind, fetch_object, class_name)
link = find_obj_in_resp response_items, 'arvados#link', link_object_name
@@ -75,17 +77,14 @@ module UsersTestHelper
assert !vm_login_perms.any?, "expected all vm_login_perms deleted"
end
- group = Group.where(name: 'All users').select do |g|
- g[:uuid].match(/-f+$/)
- end.first
- group_read_perms = Link.where(tail_uuid: uuid,
- head_uuid: group[:uuid],
+ group_write_perms = Link.where(tail_uuid: uuid,
+ head_uuid: all_users_group_uuid,
link_class: 'permission',
- name: 'can_read')
+ name: 'can_write')
if expect_group_perms
- assert group_read_perms.any?, "expected all users group read perms"
+ assert group_write_perms.any?, "expected all users group write perms"
else
- assert !group_read_perms.any?, "expected all users group perm deleted"
+ assert !group_write_perms.any?, "expected all users group write perms deleted"
end
signed_uuids = Link.where(link_class: 'signature',
diff --git a/services/api/test/integration/api_client_authorizations_api_test.rb b/services/api/test/integration/api_client_authorizations_api_test.rb
index 405e4bf687..1b5c563962 100644
--- a/services/api/test/integration/api_client_authorizations_api_test.rb
+++ b/services/api/test/integration/api_client_authorizations_api_test.rb
@@ -77,93 +77,49 @@ class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
end
[nil, db_current_time + 2.hours].each do |desired_expiration|
- test "expires_at gets clamped on non-admins when API.MaxTokenLifetime is set and desired expires_at #{desired_expiration.nil? ? 'is not set' : 'exceeds the limit'}" do
- Rails.configuration.API.MaxTokenLifetime = 1.hour
-
- # Test token creation
- start_t = db_current_time
- post "/arvados/v1/api_client_authorizations",
- params: {
- :format => :json,
- :api_client_authorization => {
- :owner_uuid => users(:active).uuid,
- :expires_at => desired_expiration,
- }
- },
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active_trustedclient).api_token}"}
- end_t = db_current_time
- assert_response 200
- expiration_t = json_response['expires_at'].to_time
- assert_operator expiration_t.to_f, :>, (start_t + Rails.configuration.API.MaxTokenLifetime).to_f
- if !desired_expiration.nil?
- assert_operator expiration_t.to_f, :<, desired_expiration.to_f
- else
- assert_operator expiration_t.to_f, :<, (end_t + Rails.configuration.API.MaxTokenLifetime).to_f
- end
-
- # Test token update
- previous_expiration = expiration_t
- token_uuid = json_response["uuid"]
- start_t = db_current_time
- put "/arvados/v1/api_client_authorizations/#{token_uuid}",
- params: {
- :api_client_authorization => {
- :expires_at => desired_expiration
- }
- },
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active_trustedclient).api_token}"}
- end_t = db_current_time
- assert_response 200
- expiration_t = json_response['expires_at'].to_time
- assert_operator previous_expiration.to_f, :<, expiration_t.to_f
- assert_operator expiration_t.to_f, :>, (start_t + Rails.configuration.API.MaxTokenLifetime).to_f
- if !desired_expiration.nil?
- assert_operator expiration_t.to_f, :<, desired_expiration.to_f
- else
- assert_operator expiration_t.to_f, :<, (end_t + Rails.configuration.API.MaxTokenLifetime).to_f
- end
- end
-
- test "behavior when expires_at is set to #{desired_expiration.nil? ? 'nil' : 'exceed the limit'} by admins when API.MaxTokenLifetime is set" do
- Rails.configuration.API.MaxTokenLifetime = 1.hour
-
- # Test token creation
- post "/arvados/v1/api_client_authorizations",
- params: {
- :format => :json,
- :api_client_authorization => {
- :owner_uuid => users(:admin).uuid,
- :expires_at => desired_expiration,
- }
- },
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
- assert_response 200
- if desired_expiration.nil?
- # When expires_at is nil, default to MaxTokenLifetime
- assert_operator (json_response['expires_at'].to_time.to_i - (db_current_time + Rails.configuration.API.MaxTokenLifetime).to_i).abs, :<, 2
- else
- assert_equal json_response['expires_at'].to_time.to_i, desired_expiration.to_i
- end
-
- # Test token update (reverse the above behavior)
- token_uuid = json_response['uuid']
- if desired_expiration.nil?
- submitted_updated_expiration = db_current_time + Rails.configuration.API.MaxTokenLifetime + 1.hour
- else
- submitted_updated_expiration = nil
- end
- put "/arvados/v1/api_client_authorizations/#{token_uuid}",
- params: {
- :api_client_authorization => {
- :expires_at => submitted_updated_expiration,
- }
- },
- headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
- assert_response 200
- if submitted_updated_expiration.nil?
- assert_operator (json_response['expires_at'].to_time.to_i - (db_current_time + Rails.configuration.API.MaxTokenLifetime).to_i).abs, :<, 2
- else
- assert_equal json_response['expires_at'].to_time.to_i, submitted_updated_expiration.to_i
+ [false, true].each do |admin|
+ test "expires_at gets clamped on #{admin ? 'admins' : 'non-admins'} when API.MaxTokenLifetime is set and desired expires_at #{desired_expiration.nil? ? 'is not set' : 'exceeds the limit'}" do
+ Rails.configuration.API.MaxTokenLifetime = 1.hour
+ token = api_client_authorizations(admin ? :admin_trustedclient : :active_trustedclient).api_token
+
+ # Test token creation
+ start_t = db_current_time
+ post "/arvados/v1/api_client_authorizations",
+ params: {
+ :format => :json,
+ :api_client_authorization => {
+ :owner_uuid => users(admin ? :admin : :active).uuid,
+ :expires_at => desired_expiration,
+ }
+ },
+ headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{token}"}
+ assert_response 200
+ expiration_t = json_response['expires_at'].to_time
+ if admin && desired_expiration
+ assert_in_delta desired_expiration.to_f, expiration_t.to_f, 1
+ else
+ assert_in_delta (start_t + Rails.configuration.API.MaxTokenLifetime).to_f, expiration_t.to_f, 2
+ end
+
+ # Test token update
+ previous_expiration = expiration_t
+ token_uuid = json_response["uuid"]
+
+ start_t = db_current_time
+ patch "/arvados/v1/api_client_authorizations/#{token_uuid}",
+ params: {
+ :api_client_authorization => {
+ :expires_at => desired_expiration
+ }
+ },
+ headers: {'HTTP_AUTHORIZATION' => "OAuth2 #{token}"}
+ assert_response 200
+ expiration_t = json_response['expires_at'].to_time
+ if admin && desired_expiration
+ assert_in_delta desired_expiration.to_f, expiration_t.to_f, 1
+ else
+ assert_in_delta (start_t + Rails.configuration.API.MaxTokenLifetime).to_f, expiration_t.to_f, 2
+ end
end
end
end
diff --git a/services/api/test/integration/api_client_authorizations_scopes_test.rb b/services/api/test/integration/api_client_authorizations_scopes_test.rb
index d015e450a6..3b28a3163f 100644
--- a/services/api/test/integration/api_client_authorizations_scopes_test.rb
+++ b/services/api/test/integration/api_client_authorizations_scopes_test.rb
@@ -16,40 +16,43 @@ class ApiTokensScopeTest < ActionDispatch::IntegrationTest
end
test "user list token can only list users" do
- get_args = [params: {}, headers: auth(:active_userlist)]
- get(v1_url('users'), *get_args)
+ get_args = {params: {}, headers: auth(:active_userlist)}
+ get(v1_url('users'), **get_args)
assert_response :success
- get(v1_url('users', ''), *get_args) # Add trailing slash.
+ get(v1_url('users', ''), **get_args) # Add trailing slash.
assert_response :success
- get(v1_url('users', 'current'), *get_args)
+ get(v1_url('users', 'current'), **get_args)
assert_response 403
- get(v1_url('virtual_machines'), *get_args)
+ get(v1_url('virtual_machines'), **get_args)
assert_response 403
end
test "narrow + wide scoped tokens for different users" do
- get_args = [params: {
- reader_tokens: [api_client_authorizations(:anonymous).api_token]
- }, headers: auth(:active_userlist)]
- get(v1_url('users'), *get_args)
+ get_args = {
+ params: {
+ reader_tokens: [api_client_authorizations(:anonymous).api_token]
+ },
+ headers: auth(:active_userlist),
+ }
+ get(v1_url('users'), **get_args)
assert_response :success
- get(v1_url('users', ''), *get_args) # Add trailing slash.
+ get(v1_url('users', ''), **get_args) # Add trailing slash.
assert_response :success
- get(v1_url('users', 'current'), *get_args)
+ get(v1_url('users', 'current'), **get_args)
assert_response 403
- get(v1_url('virtual_machines'), *get_args)
+ get(v1_url('virtual_machines'), **get_args)
assert_response 403
end
test "specimens token can see exactly owned specimens" do
- get_args = [params: {}, headers: auth(:active_specimens)]
- get(v1_url('specimens'), *get_args)
+ get_args = {params: {}, headers: auth(:active_specimens)}
+ get(v1_url('specimens'), **get_args)
assert_response 403
- get(v1_url('specimens', specimens(:owned_by_active_user).uuid), *get_args)
+ get(v1_url('specimens', specimens(:owned_by_active_user).uuid), **get_args)
assert_response :success
- head(v1_url('specimens', specimens(:owned_by_active_user).uuid), *get_args)
+ head(v1_url('specimens', specimens(:owned_by_active_user).uuid), **get_args)
assert_response :success
- get(v1_url('specimens', specimens(:owned_by_spectator).uuid), *get_args)
+ get(v1_url('specimens', specimens(:owned_by_spectator).uuid), **get_args)
assert_includes(403..404, @response.status)
end
@@ -82,12 +85,12 @@ class ApiTokensScopeTest < ActionDispatch::IntegrationTest
test "token without scope has no access" do
# Logs are good for this test, because logs have relatively
# few access controls enforced at the model level.
- req_args = [params: {}, headers: auth(:admin_noscope)]
- get(v1_url('logs'), *req_args)
+ req_args = {params: {}, headers: auth(:admin_noscope)}
+ get(v1_url('logs'), **req_args)
assert_response 403
- get(v1_url('logs', logs(:noop).uuid), *req_args)
+ get(v1_url('logs', logs(:noop).uuid), **req_args)
assert_response 403
- post(v1_url('logs'), *req_args)
+ post(v1_url('logs'), **req_args)
assert_response 403
end
@@ -97,10 +100,10 @@ class ApiTokensScopeTest < ActionDispatch::IntegrationTest
def vm_logins_url(name)
v1_url('virtual_machines', virtual_machines(name).uuid, 'logins')
end
- get_args = [params: {}, headers: auth(:admin_vm)]
- get(vm_logins_url(:testvm), *get_args)
+ get_args = {params: {}, headers: auth(:admin_vm)}
+ get(vm_logins_url(:testvm), **get_args)
assert_response :success
- get(vm_logins_url(:testvm2), *get_args)
+ get(vm_logins_url(:testvm2), **get_args)
assert_includes(400..419, @response.status,
"getting testvm2 logins should have failed")
end
diff --git a/services/api/test/integration/bundler_version_test.rb b/services/api/test/integration/bundler_version_test.rb
new file mode 100644
index 0000000000..fb1634cf90
--- /dev/null
+++ b/services/api/test/integration/bundler_version_test.rb
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class BundlerVersionTest < ActionDispatch::IntegrationTest
+ test "Bundler version matches expectations" do
+ # The expected version range should be the latest that supports all the
+ # versions of Ruby we intend to support. This test checks that a developer
+ # doesn't accidentally update Bundler past that point.
+ expected = Gem::Dependency.new("", "~> 2.4.22")
+ actual = Bundler.gem_version
+ assert(
+ expected.match?("", actual),
+ "Bundler version #{actual} did not match #{expected}",
+ )
+ end
+end
diff --git a/services/api/test/integration/cross_origin_test.rb b/services/api/test/integration/cross_origin_test.rb
index e3099f1573..6a3db89fc4 100644
--- a/services/api/test/integration/cross_origin_test.rb
+++ b/services/api/test/integration/cross_origin_test.rb
@@ -5,10 +5,10 @@
require 'test_helper'
class CrossOriginTest < ActionDispatch::IntegrationTest
- def options *args
+ def options path, **kwargs
# Rails doesn't support OPTIONS the same way as GET, POST, etc.
reset! unless integration_session
- integration_session.__send__(:process, :options, *args).tap do
+ integration_session.__send__(:process, :options, path, **kwargs).tap do
copy_session_variables!
end
end
diff --git a/services/api/test/integration/discovery_document_test.rb b/services/api/test/integration/discovery_document_test.rb
new file mode 100644
index 0000000000..37e7750297
--- /dev/null
+++ b/services/api/test/integration/discovery_document_test.rb
@@ -0,0 +1,58 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class DiscoveryDocumentTest < ActionDispatch::IntegrationTest
+ CANONICAL_FIELDS = [
+ "auth",
+ "basePath",
+ "batchPath",
+ "description",
+ "discoveryVersion",
+ "documentationLink",
+ "id",
+ "kind",
+ "name",
+ "parameters",
+ "protocol",
+ "resources",
+ "revision",
+ "schemas",
+ "servicePath",
+ "title",
+ "version",
+ ]
+
+ test "canonical discovery document is saved to checkout" do
+ get "/discovery/v1/apis/arvados/v1/rest"
+ assert_response :success
+ canonical = Hash[CANONICAL_FIELDS.map { |key| [key, json_response[key]] }]
+ missing = canonical.select { |key| canonical[key].nil? }
+ assert(missing.empty?, "discovery document missing required fields")
+ actual_json = JSON.pretty_generate(canonical)
+
+ # Currently the Python SDK is the only component using this copy of the
+ # discovery document, and storing it with the source simplifies the build
+ # process, so it lives there. If another component wants to use it later,
+ # we might consider moving it to a more general subdirectory, but then the
+ # Python build process will need to be extended to accommodate that.
+ src_path = Rails.root.join("../../sdk/python/arvados-v1-discovery.json")
+ begin
+ expected_json = File.open(src_path) { |f| f.read }
+ rescue Errno::ENOENT
+ expected_json = "(#{src_path} not found)"
+ end
+
+ out_path = Rails.root.join("tmp", "test-arvados-v1-discovery.json")
+ if expected_json != actual_json
+ File.open(out_path, "w") { |f| f.write(actual_json) }
+ end
+ assert_equal(expected_json, actual_json, [
+ "#{src_path} did not match the live discovery document",
+ "Current live version saved to #{out_path}",
+ "Commit that to #{src_path} to regenerate documentation",
+ ].join(". "))
+ end
+end
diff --git a/services/api/test/integration/http_quirks_test.rb b/services/api/test/integration/http_quirks_test.rb
new file mode 100644
index 0000000000..107e6a6550
--- /dev/null
+++ b/services/api/test/integration/http_quirks_test.rb
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class HttpQuirksTest < ActionDispatch::IntegrationTest
+ fixtures :all
+
+ test "GET request with empty Content-Type header" do
+ authorize_with :active
+ get "/arvados/v1/collections",
+ headers: auth(:active).merge("Content-Type" => "")
+ assert_response :success
+ end
+end
diff --git a/services/api/test/integration/permissions_test.rb b/services/api/test/integration/permissions_test.rb
index 65f5adc1d1..d2dce44f01 100644
--- a/services/api/test/integration/permissions_test.rb
+++ b/services/api/test/integration/permissions_test.rb
@@ -712,4 +712,87 @@ class PermissionsTest < ActionDispatch::IntegrationTest
assert_response :success
assert_empty json_response['manifest_text'], "empty collection manifest_text is not empty"
end
+
+ [['can_write', 'can_read', 'can_write'],
+ ['can_manage', 'can_write', 'can_manage'],
+ ['can_manage', 'can_read', 'can_manage'],
+ ['can_read', 'can_write', 'can_write'],
+ ['can_read', 'can_manage', 'can_manage'],
+ ['can_write', 'can_manage', 'can_manage'],
+ ].each do |perm1, perm2, expect|
+ test "creating #{perm2} permission returns existing #{perm1} link as #{expect}" do
+ link1 = act_as_system_user do
+ Link.create!({
+ link_class: "permission",
+ tail_uuid: users(:active).uuid,
+ head_uuid: collections(:baz_file).uuid,
+ name: perm1,
+ })
+ end
+ post "/arvados/v1/links",
+ params: {
+ link: {
+ link_class: "permission",
+ tail_uuid: users(:active).uuid,
+ head_uuid: collections(:baz_file).uuid,
+ name: perm2,
+ },
+ },
+ headers: auth(:admin)
+ assert_response :success
+ assert_equal link1.uuid, json_response["uuid"]
+ assert_equal expect, json_response["name"]
+ link1.reload
+ assert_equal expect, link1.name
+ end
+ end
+
+ test "creating duplicate login permission returns existing link" do
+ link1 = act_as_system_user do
+ Link.create!({
+ link_class: "permission",
+ tail_uuid: users(:active).uuid,
+ head_uuid: virtual_machines(:testvm2).uuid,
+ name: "can_login",
+ properties: {"username": "foo1"}
+ })
+ end
+ link2 = act_as_system_user do
+ Link.create!({
+ link_class: "permission",
+ tail_uuid: users(:active).uuid,
+ head_uuid: virtual_machines(:testvm2).uuid,
+ name: "can_login",
+ properties: {"username": "foo2"}
+ })
+ end
+ link3 = act_as_system_user do
+ Link.create!({
+ link_class: "permission",
+ tail_uuid: users(:active).uuid,
+ head_uuid: virtual_machines(:testvm2).uuid,
+ name: "can_read",
+ })
+ end
+ post "/arvados/v1/links",
+ params: {
+ link: {
+ link_class: "permission",
+ tail_uuid: users(:active).uuid,
+ head_uuid: virtual_machines(:testvm2).uuid,
+ name: "can_login",
+ properties: {"username": "foo2"},
+ },
+ },
+ headers: auth(:admin)
+ assert_response :success
+ assert_equal link2.uuid, json_response["uuid"]
+ assert_equal link2.created_at.to_date, json_response["created_at"].to_date
+ assert_equal "can_login", json_response["name"]
+ assert_equal "foo2", json_response["properties"]["username"]
+ link1.reload
+ assert_equal "foo1", link1.properties["username"]
+ link2.reload
+ assert_equal "foo2", link2.properties["username"]
+ end
end
diff --git a/services/api/test/integration/remote_user_test.rb b/services/api/test/integration/remote_user_test.rb
index 179d30f3cb..98250a6242 100644
--- a/services/api/test/integration/remote_user_test.rb
+++ b/services/api/test/integration/remote_user_test.rb
@@ -55,7 +55,6 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
SSLCertName: [["CN", WEBrick::Utils::getservername]],
StartCallback: lambda { ready.push(true) })
srv.mount_proc '/discovery/v1/apis/arvados/v1/rest' do |req, res|
- Rails.cache.delete 'arvados_v1_rest_discovery'
res.body = Arvados::V1::SchemaController.new.send(:discovery_doc).to_json
end
srv.mount_proc '/arvados/v1/users/current' do |req, res|
@@ -75,10 +74,15 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
end
res.status = @stub_token_status
if res.status == 200
- res.body = {
- uuid: api_client_authorizations(:active).uuid.sub('zzzzz', clusterid),
+ body = {
+ uuid: @stub_token_uuid || api_client_authorizations(:active).uuid.sub('zzzzz', clusterid),
+ owner_uuid: "#{clusterid}-tpzed-00000000000000z",
scopes: @stub_token_scopes,
- }.to_json
+ }
+ if @stub_content.is_a?(Hash) and owner_uuid = @stub_content[:uuid]
+ body[:owner_uuid] = owner_uuid
+ end
+ res.body = body.to_json
end
end
Thread.new do
@@ -96,12 +100,16 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
uuid: 'zbbbb-tpzed-000000000000001',
email: 'foo@example.com',
username: 'barney',
+ first_name: "Barney",
+ last_name: "Foo",
is_admin: true,
is_active: true,
is_invited: true,
}
@stub_token_status = 200
@stub_token_scopes = ["all"]
+ @stub_token_uuid = nil
+ ActionMailer::Base.deliveries = []
end
teardown do
@@ -110,6 +118,15 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
end
end
+ def uncache_token(src)
+ if match = src.match(/\b(?:[a-z0-9]{5}-){2}[a-z0-9]{15}\b/)
+ tokens = ApiClientAuthorization.where(uuid: match[0])
+ else
+ tokens = ApiClientAuthorization.where("uuid like ?", "#{src}-%")
+ end
+ tokens.update_all(expires_at: "1995-05-15T01:02:03Z")
+ end
+
test 'authenticate with remote token that has limited scope' do
get '/arvados/v1/collections',
params: {format: 'json'},
@@ -124,10 +141,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
headers: auth(remote: 'zbbbb')
assert_response :success
- # simulate cache expiry
- ApiClientAuthorization.where('uuid like ?', 'zbbbb-%').
- update_all(expires_at: db_current_time - 1.minute)
-
+ uncache_token('zbbbb')
# re-authorize after cache expires
get '/arvados/v1/collections',
params: {format: 'json'},
@@ -135,6 +149,14 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_response 403
end
+ test "authenticate with remote token with limited initial scope" do
+ @stub_token_scopes = ["GET /arvados/v1/users/"]
+ get "/arvados/v1/users/#{@stub_content[:uuid]}",
+ params: {format: "json"},
+ headers: auth(remote: "zbbbb")
+ assert_response :success
+ end
+
test 'authenticate with remote token' do
get '/arvados/v1/users/current',
params: {format: 'json'},
@@ -147,7 +169,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_equal 'barney', json_response['username']
# revoke original token
- @stub_status = 401
+ @stub_token_status = 401
# re-authorize before cache expires
get '/arvados/v1/users/current',
@@ -155,10 +177,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
headers: auth(remote: 'zbbbb')
assert_response :success
- # simulate cache expiry
- ApiClientAuthorization.where('uuid like ?', 'zbbbb-%').
- update_all(expires_at: db_current_time - 1.minute)
-
+ uncache_token('zbbbb')
# re-authorize after cache expires
get '/arvados/v1/users/current',
params: {format: 'json'},
@@ -173,7 +192,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
update_all(user_id: users(:active).id)
# revive original token and re-authorize
- @stub_status = 200
+ @stub_token_status = 200
@stub_content[:username] = 'blarney'
@stub_content[:email] = 'blarney@example.com'
get '/arvados/v1/users/current',
@@ -196,11 +215,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
@stub_content[:is_active] = false
@stub_content[:is_invited] = false
- # simulate cache expiry
- ApiClientAuthorization.where(
- uuid: salted_active_token(remote: 'zbbbb').split('/')[1]).
- update_all(expires_at: db_current_time - 1.minute)
-
+ uncache_token('zbbbb')
# re-authorize after cache expires
get '/arvados/v1/users/current',
params: {format: 'json'},
@@ -227,6 +242,40 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_equal 'foo', json_response['username']
end
+ test 'authenticate with remote token with secret part identical to previously cached token' do
+ get '/arvados/v1/users/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+ get '/arvados/v1/api_client_authorizations/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+
+ # Expire the cached token.
+ @cached_token_uuid = json_response['uuid']
+ act_as_system_user do
+ ApiClientAuthorization.where(uuid: @cached_token_uuid).update_all(expires_at: db_current_time() - 1.day)
+ end
+
+ # Now use the same bare token, but set up the remote cluster to
+ # return a different UUID this time.
+ @stub_token_uuid = 'zbbbb-gj3su-123451234512345'
+ get '/arvados/v1/users/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+
+ # Confirm that we actually retrieved the new UUID from the stub
+ # cluster -- otherwise we didn't really test the conflicting-UUID
+ # case.
+ get '/arvados/v1/api_client_authorizations/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+ assert_equal @stub_token_uuid, json_response['uuid']
+ end
+
test 'authenticate with remote token from misbehaving remote cluster' do
get '/arvados/v1/users/current',
params: {format: 'json'},
@@ -355,6 +404,12 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
test 'get user from Login cluster' do
Rails.configuration.Login.LoginCluster = 'zbbbb'
+ email_dest = ActiveSupport::OrderedOptions.new
+ email_dest[:'arvados-admin@example.com'] = ActiveSupport::OrderedOptions.new
+ Rails.configuration.Users.UserNotifierEmailBcc = email_dest
+ Rails.configuration.Users.NewUserNotificationRecipients = email_dest
+ Rails.configuration.Users.NewInactiveUserNotificationRecipients = email_dest
+
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
@@ -364,14 +419,18 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_equal true, json_response['is_active']
assert_equal 'foo@example.com', json_response['email']
assert_equal 'barney', json_response['username']
+
+ assert_equal 2, ActionMailer::Base.deliveries.length
+ assert_equal "Welcome to Arvados - account enabled", ActionMailer::Base.deliveries[0].subject
+ assert_equal "[ARVADOS] New user created notification", ActionMailer::Base.deliveries[1].subject
end
[true, false].each do |trusted|
[true, false].each do |logincluster|
- [true, false].each do |admin|
- [true, false].each do |active|
+ [true, false, nil].each do |admin|
+ [true, false, nil].each do |active|
[true, false].each do |autosetup|
- [true, false].each do |invited|
+ [true, false, nil].each do |invited|
test "get invited=#{invited}, active=#{active}, admin=#{admin} user from #{if logincluster then "Login" else "peer" end} cluster when AutoSetupNewUsers=#{autosetup} ActivateUsers=#{trusted}" do
Rails.configuration.Login.LoginCluster = 'zbbbb' if logincluster
Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = trusted
@@ -389,9 +448,9 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
headers: auth(remote: 'zbbbb')
assert_response :success
assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
- assert_equal (logincluster && admin && invited && active), json_response['is_admin']
- assert_equal (invited and (logincluster || trusted || autosetup)), json_response['is_invited']
- assert_equal (invited and (logincluster || trusted) and active), json_response['is_active']
+ assert_equal (logincluster && !!admin && (invited != false) && !!active), json_response['is_admin']
+ assert_equal ((invited == true || (invited == nil && !!active)) && (logincluster || trusted || autosetup)), json_response['is_invited']
+ assert_equal ((invited != false) && (logincluster || trusted) && !!active), json_response['is_active']
assert_equal 'foo@example.com', json_response['email']
assert_equal 'barney', json_response['username']
end
@@ -446,11 +505,8 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_equal 'foo@example.com', json_response['email']
assert_equal 'barney', json_response['username']
- # Delete cached value. User should be inactive now.
- act_as_system_user do
- ApiClientAuthorization.delete_all
- end
-
+ uncache_token('zbbbb')
+ # User should be inactive now.
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
@@ -572,5 +628,68 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
assert_equal 'zzzzz-tpzed-anonymouspublic', json_response['uuid']
end
+ [400, 401, 403, 422, 500, 502, 503].each do |status|
+ test "handle #{status} response when checking remote-provided v2 token" do
+ @stub_token_status = status
+ get "/arvados/v1/users/#{@stub_content[:uuid]}",
+ params: {format: "json"},
+ headers: auth(remote: "zbbbb")
+ assert_response(status < 500 ? 401 : status)
+ end
+
+ test "handle #{status} response when checking remote-provided v2 token at anonymously accessible endpoint" do
+ @stub_token_status = status
+ get "/arvados/v1/keep_services/accessible",
+ params: {format: "json"},
+ headers: auth(remote: "zbbbb")
+ assert_response(status < 500 ? :success : status)
+ end
+ test "handle #{status} response when checking token issued by login cluster" do
+ @stub_token_status = status
+ Rails.configuration.Login.LoginCluster = "zbbbb"
+ get "/arvados/v1/users/current",
+ params: {format: "json"},
+ headers: {'HTTP_AUTHORIZATION' => "Bearer badtoken"}
+ assert_response(status < 500 ? 401 : status)
+ end
+
+ test "handle #{status} response when checking token issued by login cluster at anonymously accessible endpoint" do
+ @stub_token_status = status
+ Rails.configuration.Login.LoginCluster = "zbbbb"
+ get "/arvados/v1/keep_services/accessible",
+ params: {format: "json"},
+ headers: {'HTTP_AUTHORIZATION' => "Bearer badtoken"}
+ assert_response(status < 500 ? :success : status)
+ end
+ end
+
+ [401, 403, 422, 500, 502, 503].each do |status|
+ test "propagate #{status} response from getting uncached user" do
+ @stub_status = status
+ get "/arvados/v1/users/#{@stub_content[:uuid]}",
+ params: {format: "json"},
+ headers: auth(remote: "zbbbb")
+ assert_response status
+ end
+
+ test "use cached user after getting #{status} response" do
+ url_path = "/arvados/v1/users/#{@stub_content[:uuid]}"
+ params = {format: "json"}
+ headers = auth(remote: "zbbbb")
+
+ get url_path, params: params, headers: headers
+ assert_response :success
+
+ uncache_token(headers["HTTP_AUTHORIZATION"])
+ expect_email = @stub_content[:email]
+ @stub_content[:email] = "new#{expect_email}"
+ @stub_status = status
+ get url_path, params: params, headers: headers
+ assert_response :success
+ user = User.find_by_uuid(@stub_content[:uuid])
+ assert_not_nil user
+ assert_equal expect_email, user.email
+ end
+ end
end
diff --git a/services/api/test/integration/user_sessions_test.rb b/services/api/test/integration/user_sessions_test.rb
index 76659f3207..eb49cf832e 100644
--- a/services/api/test/integration/user_sessions_test.rb
+++ b/services/api/test/integration/user_sessions_test.rb
@@ -8,7 +8,7 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
# remote prefix & return url packed into the return_to param passed around
# between API and SSO provider.
def client_url(remote: nil)
- url = ',https://wb.example.com'
+ url = ',https://controller.api.client.invalid'
url = "#{remote}#{url}" unless remote.nil?
url
end
diff --git a/services/api/test/integration/users_test.rb b/services/api/test/integration/users_test.rb
index 430f0d385d..f8956b21e2 100644
--- a/services/api/test/integration/users_test.rb
+++ b/services/api/test/integration/users_test.rb
@@ -40,7 +40,7 @@ class UsersTest < ActionDispatch::IntegrationTest
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -85,7 +85,7 @@ class UsersTest < ActionDispatch::IntegrationTest
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@@ -113,7 +113,7 @@ class UsersTest < ActionDispatch::IntegrationTest
# two new links: system_group, and 'All users' group.
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -135,7 +135,7 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_equal 'foo@example.com', created['email'], 'expected input email'
# verify links
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
@@ -163,7 +163,7 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_equal created['email'], 'foo@example.com', 'expected original email'
# verify links
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@@ -187,7 +187,7 @@ class UsersTest < ActionDispatch::IntegrationTest
# four extra links: system_group, login, group, repo and vm
- verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+ verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
'All users', created['uuid'], 'arvados#group', true, 'Group'
verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
@@ -203,6 +203,22 @@ class UsersTest < ActionDispatch::IntegrationTest
ApiClientAuthorization.create!(user: User.find_by_uuid(created['uuid']), api_client: ApiClient.all.first).api_token
end
+ # share project and collections with the new user
+ act_as_system_user do
+ Link.create!(tail_uuid: created['uuid'],
+ head_uuid: groups(:aproject).uuid,
+ link_class: 'permission',
+ name: 'can_manage')
+ Link.create!(tail_uuid: created['uuid'],
+ head_uuid: collections(:collection_owned_by_active).uuid,
+ link_class: 'permission',
+ name: 'can_read')
+ Link.create!(tail_uuid: created['uuid'],
+ head_uuid: collections(:collection_owned_by_active_with_file_stats).uuid,
+ link_class: 'permission',
+ name: 'can_write')
+ end
+
assert_equal 1, ApiClientAuthorization.where(user_id: User.find_by_uuid(created['uuid']).id).size, 'expected token not found'
post "/arvados/v1/users/#{created['uuid']}/unsetup", params: {}, headers: auth(:admin)
@@ -213,6 +229,8 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_not_nil created2['uuid'], 'expected uuid for the newly created user'
assert_equal created['uuid'], created2['uuid'], 'expected uuid not found'
assert_equal 0, ApiClientAuthorization.where(user_id: User.find_by_uuid(created['uuid']).id).size, 'token should have been deleted by user unsetup'
+ # check permissions are deleted
+ assert_empty Link.where(tail_uuid: created['uuid'])
verify_link_existence created['uuid'], created['email'], false, false, false, false, false
end
@@ -285,15 +303,15 @@ class UsersTest < ActionDispatch::IntegrationTest
assert_response :success
rp = json_response
assert_not_nil rp["uuid"]
- assert_not_nil rp["is_active"]
- assert_nil rp["is_admin"]
+ assert_equal true, rp["is_active"]
+ assert_equal false, rp["is_admin"]
get "/arvados/v1/users/#{rp['uuid']}",
params: {format: 'json'},
headers: auth(:admin)
assert_response :success
assert_equal rp["uuid"], json_response['uuid']
- assert_nil json_response['is_admin']
+ assert_equal false, json_response['is_admin']
assert_equal true, json_response['is_active']
assert_equal 'foo@example.com', json_response['email']
assert_equal 'barney', json_response['username']
diff --git a/services/api/test/tasks/delete_old_container_logs_test.rb b/services/api/test/tasks/delete_old_container_logs_test.rb
deleted file mode 100644
index c81b331f24..0000000000
--- a/services/api/test/tasks/delete_old_container_logs_test.rb
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'rake'
-
-Rake.application.rake_require "tasks/delete_old_container_logs"
-Rake::Task.define_task(:environment)
-
-class DeleteOldContainerLogsTaskTest < ActiveSupport::TestCase
- TASK_NAME = "db:delete_old_container_logs"
-
- def log_uuids(*fixture_names)
- fixture_names.map { |name| logs(name).uuid }
- end
-
- def run_with_expiry(clean_after)
- Rails.configuration.Containers.Logging.MaxAge = clean_after
- Rake::Task[TASK_NAME].reenable
- Rake.application.invoke_task TASK_NAME
- end
-
- def check_log_existence(test_method, fixture_uuids)
- uuids_now = Log.where("object_uuid LIKE :pattern AND event_type in ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat')", pattern: "%-dz642-%").map(&:uuid)
- fixture_uuids.each do |expect_uuid|
- send(test_method, uuids_now, expect_uuid)
- end
- end
-
- test "delete all finished logs" do
- uuids_to_keep = log_uuids(:stderr_for_running_container,
- :crunchstat_for_running_container)
- uuids_to_clean = log_uuids(:stderr_for_previous_container,
- :crunchstat_for_previous_container,
- :stderr_for_ancient_container,
- :crunchstat_for_ancient_container)
- run_with_expiry(1)
- check_log_existence(:assert_includes, uuids_to_keep)
- check_log_existence(:refute_includes, uuids_to_clean)
- end
-
- test "delete old finished logs" do
- uuids_to_keep = log_uuids(:stderr_for_running_container,
- :crunchstat_for_running_container,
- :stderr_for_previous_container,
- :crunchstat_for_previous_container)
- uuids_to_clean = log_uuids(:stderr_for_ancient_container,
- :crunchstat_for_ancient_container)
- run_with_expiry(360.days)
- check_log_existence(:assert_includes, uuids_to_keep)
- check_log_existence(:refute_includes, uuids_to_clean)
- end
-end
diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb
index 843d4f1b23..0255d8907d 100644
--- a/services/api/test/test_helper.rb
+++ b/services/api/test/test_helper.rb
@@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: AGPL-3.0
-require 'update_permissions'
+require_relative '../lib/update_permissions'
ENV["RAILS_ENV"] = "test"
unless ENV["NO_COVERAGE_TEST"]
@@ -179,21 +179,21 @@ class ActionController::TestCase
end
[:get, :post, :put, :patch, :delete].each do |method|
- define_method method do |action, *args|
+ define_method method do |action, **kwargs|
check_counter action
# After Rails 5.0 upgrade, some params don't get properly serialized.
# One case are filters: [['attr', 'op', 'val']] become [['attr'], ['op'], ['val']]
# if not passed upstream as a JSON string.
- if args[0].is_a?(Hash) && args[0][:params].is_a?(Hash)
- args[0][:params].each do |key, _|
+ if kwargs[:params].is_a?(Hash)
+ kwargs[:params].each do |key, _|
next if key == :exclude_script_versions # Job Reuse tests
# Keys could be: :filters, :where, etc
- if [Array, Hash].include?(args[0][:params][key].class)
- args[0][:params][key] = SafeJSON.dump(args[0][:params][key])
+ if [Array, Hash].include?(kwargs[:params][key].class)
+ kwargs[:params][key] = SafeJSON.dump(kwargs[:params][key])
end
end
end
- super action, *args
+ super action, **kwargs
end
end
diff --git a/services/api/test/unit/api_client_test.rb b/services/api/test/unit/api_client_test.rb
index a0eacfd13b..dbe9c86367 100644
--- a/services/api/test/unit/api_client_test.rb
+++ b/services/api/test/unit/api_client_test.rb
@@ -40,4 +40,31 @@ class ApiClientTest < ActiveSupport::TestCase
end
end
end
+
+ [
+ [true, "https://ok.example", "https://ok.example"],
+ [true, "https://ok.example:443/", "https://ok.example"],
+ [true, "https://ok.example", "https://ok.example:443/"],
+ [true, "https://ok.example", "https://ok.example/foo/bar"],
+ [true, "https://ok.example", "https://ok.example?foo/bar"],
+ [true, "https://ok.example/waz?quux", "https://ok.example/foo?bar#baz"],
+ [false, "https://ok.example", "http://ok.example"],
+ [false, "https://ok.example", "http://ok.example:443"],
+
+ [true, "https://*.wildcard.example", "https://ok.wildcard.example"],
+ [true, "https://*.wildcard.example", "https://ok.ok.ok.wildcard.example"],
+ [false, "https://*.wildcard.example", "http://wrongscheme.wildcard.example"],
+ [false, "https://*.wildcard.example", "https://wrongport.wildcard.example:80"],
+ [false, "https://*.wildcard.example", "https://ok.wildcard.example.attacker.example/"],
+ [false, "https://*.wildcard.example", "https://attacker.example/https://ok.wildcard.example/"],
+ [false, "https://*.wildcard.example", "https://attacker.example/?https://ok.wildcard.example/"],
+ [false, "https://*.wildcard.example", "https://attacker.example/#https://ok.wildcard.example/"],
+ [false, "https://*-wildcard.example", "https://notsupported-wildcard.example"],
+ ].each do |pass, trusted, current|
+ test "is_trusted(#{current}) returns #{pass} based on #{trusted} in TrustedClients" do
+ Rails.configuration.Login.TrustedClients = ActiveSupport::OrderedOptions.new
+ Rails.configuration.Login.TrustedClients[trusted.to_sym] = ActiveSupport::OrderedOptions.new
+ assert_equal pass, ApiClient.new(url_prefix: current).is_trusted
+ end
+ end
end
diff --git a/services/api/test/unit/arvados_model_test.rb b/services/api/test/unit/arvados_model_test.rb
index 1e2e08059e..69a2710bb9 100644
--- a/services/api/test/unit/arvados_model_test.rb
+++ b/services/api/test/unit/arvados_model_test.rb
@@ -217,13 +217,13 @@ class ArvadosModelTest < ActiveSupport::TestCase
assert group.valid?, "group is not valid"
# update 1
- group.update_attributes!(name: "test create and update name 1")
+ group.update!(name: "test create and update name 1")
results = Group.where(uuid: group.uuid)
assert_equal "test create and update name 1", results.first.name, "Expected name to be updated to 1"
updated_at_1 = results.first.updated_at.to_f
# update 2
- group.update_attributes!(name: "test create and update name 2")
+ group.update!(name: "test create and update name 2")
results = Group.where(uuid: group.uuid)
assert_equal "test create and update name 2", results.first.name, "Expected name to be updated to 2"
updated_at_2 = results.first.updated_at.to_f
@@ -237,15 +237,15 @@ class ArvadosModelTest < ActiveSupport::TestCase
c = Collection.create!(properties: {})
assert_equal({}, c.properties)
- c.update_attributes(properties: {'foo' => 'foo'})
+ c.update(properties: {'foo' => 'foo'})
c.reload
assert_equal({'foo' => 'foo'}, c.properties)
- c.update_attributes(properties: nil)
+ c.update(properties: nil)
c.reload
assert_equal({}, c.properties)
- c.update_attributes(properties: {foo: 'bar'})
+ c.update(properties: {foo: 'bar'})
assert_equal({'foo' => 'bar'}, c.properties)
c.reload
assert_equal({'foo' => 'bar'}, c.properties)
diff --git a/services/api/test/unit/collection_test.rb b/services/api/test/unit/collection_test.rb
index e7134a5be5..f3b48dbf70 100644
--- a/services/api/test/unit/collection_test.rb
+++ b/services/api/test/unit/collection_test.rb
@@ -91,19 +91,19 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal 34, c.file_size_total
# Updating the manifest should change file stats
- c.update_attributes(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:34:foo2.txt\n")
+ c.update(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:34:foo2.txt\n")
assert c.valid?
assert_equal 2, c.file_count
assert_equal 68, c.file_size_total
# Updating file stats and the manifest should use manifest values
- c.update_attributes(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n", file_count:10, file_size_total: 10)
+ c.update(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n", file_count:10, file_size_total: 10)
assert c.valid?
assert_equal 1, c.file_count
assert_equal 34, c.file_size_total
# Updating just the file stats should be ignored
- c.update_attributes(file_count: 10, file_size_total: 10)
+ c.update(file_count: 10, file_size_total: 10)
assert c.valid?
assert_equal 1, c.file_count
assert_equal 34, c.file_size_total
@@ -166,7 +166,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal 1, c.version
assert_equal false, c.preserve_version
# Make a versionable update, it shouldn't create a new version yet
- c.update_attributes!({'name' => 'bar'})
+ c.update!({'name' => 'bar'})
c.reload
assert_equal 'bar', c.name
assert_equal 1, c.version
@@ -175,12 +175,12 @@ class CollectionTest < ActiveSupport::TestCase
c.update_column('modified_at', fifteen_min_ago) # Update without validations/callbacks
c.reload
assert_equal fifteen_min_ago.to_i, c.modified_at.to_i
- c.update_attributes!({'name' => 'baz'})
+ c.update!({'name' => 'baz'})
c.reload
assert_equal 'baz', c.name
assert_equal 2, c.version
# Make another update, no new version should be created
- c.update_attributes!({'name' => 'foobar'})
+ c.update!({'name' => 'foobar'})
c.reload
assert_equal 'foobar', c.name
assert_equal 2, c.version
@@ -197,7 +197,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_not_nil c.replication_confirmed_at
assert_not_nil c.replication_confirmed
# Make the versionable update
- c.update_attributes!({'name' => 'foobarbaz'})
+ c.update!({'name' => 'foobarbaz'})
c.reload
assert_equal 'foobarbaz', c.name
assert_equal 3, c.version
@@ -214,7 +214,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal 1, c.version
assert_equal false, c.preserve_version
# This update shouldn't produce a new version, as the idle time is not up
- c.update_attributes!({
+ c.update!({
'name' => 'bar'
})
c.reload
@@ -223,7 +223,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal false, c.preserve_version
# This update should produce a new version, even if the idle time is not up
# and also keep the preserve_version=true flag to persist it.
- c.update_attributes!({
+ c.update!({
'name' => 'baz',
'preserve_version' => true
})
@@ -234,7 +234,7 @@ class CollectionTest < ActiveSupport::TestCase
# Make sure preserve_version is not disabled after being enabled, unless
# a new version is created.
# This is a non-versionable update
- c.update_attributes!({
+ c.update!({
'preserve_version' => false,
'replication_desired' => 2
})
@@ -243,7 +243,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal 2, c.replication_desired
assert_equal true, c.preserve_version
# This is a versionable update
- c.update_attributes!({
+ c.update!({
'preserve_version' => false,
'name' => 'foobar'
})
@@ -252,7 +252,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal false, c.preserve_version
assert_equal 'foobar', c.name
# Flipping only 'preserve_version' to true doesn't create a new version
- c.update_attributes!({'preserve_version' => true})
+ c.update!({'preserve_version' => true})
c.reload
assert_equal 3, c.version
assert_equal true, c.preserve_version
@@ -265,7 +265,7 @@ class CollectionTest < ActiveSupport::TestCase
assert c.valid?
assert_equal false, c.preserve_version
modified_at = c.modified_at.to_f
- c.update_attributes!({'preserve_version' => true})
+ c.update!({'preserve_version' => true})
c.reload
assert_equal true, c.preserve_version
assert_equal modified_at, c.modified_at.to_f,
@@ -285,7 +285,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal 1, c.version
assert_raises(ActiveRecord::RecordInvalid) do
- c.update_attributes!({
+ c.update!({
name => new_value
})
end
@@ -302,14 +302,14 @@ class CollectionTest < ActiveSupport::TestCase
assert c.valid?
assert_equal 1, c.version
# Make changes so that a new version is created
- c.update_attributes!({'name' => 'bar'})
+ c.update!({'name' => 'bar'})
c.reload
assert_equal 2, c.version
assert_equal 2, Collection.where(current_version_uuid: c.uuid).count
new_uuid = 'zzzzz-4zz18-somefakeuuidnow'
assert_empty Collection.where(uuid: new_uuid)
# Update UUID on current version, check that both collections point to it
- c.update_attributes!({'uuid' => new_uuid})
+ c.update!({'uuid' => new_uuid})
c.reload
assert_equal new_uuid, c.uuid
assert_equal 2, Collection.where(current_version_uuid: new_uuid).count
@@ -364,7 +364,7 @@ class CollectionTest < ActiveSupport::TestCase
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
assert c.valid?
- c.update_attributes!({'properties' => value_1})
+ c.update!({'properties' => value_1})
c.reload
assert c.changes.keys.empty?
c.properties = value_2
@@ -386,7 +386,7 @@ class CollectionTest < ActiveSupport::TestCase
assert c.valid?
original_version_modified_at = c.modified_at.to_f
# Make changes so that a new version is created
- c.update_attributes!({'name' => 'bar'})
+ c.update!({'name' => 'bar'})
c.reload
assert_equal 2, c.version
# Get the old version
@@ -400,7 +400,7 @@ class CollectionTest < ActiveSupport::TestCase
# Make update on current version so old version get the attribute synced;
# its modified_at should not change.
new_replication = 3
- c.update_attributes!({'replication_desired' => new_replication})
+ c.update!({'replication_desired' => new_replication})
c.reload
assert_equal new_replication, c.replication_desired
c_old.reload
@@ -441,7 +441,7 @@ class CollectionTest < ActiveSupport::TestCase
c = create_collection 'foo', Encoding::US_ASCII
assert c.valid?
# Make changes so that a new version is created
- c.update_attributes!({'name' => 'bar'})
+ c.update!({'name' => 'bar'})
c.reload
assert_equal 2, c.version
# Get the old version
@@ -479,7 +479,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_not_equal first_val, c.attributes[attr]
# Make changes so that a new version is created and a synced field is
# updated on both
- c.update_attributes!({'name' => 'bar', attr => first_val})
+ c.update!({'name' => 'bar', attr => first_val})
c.reload
assert_equal 2, c.version
assert_equal first_val, c.attributes[attr]
@@ -487,7 +487,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_equal first_val, Collection.where(current_version_uuid: c.uuid, version: 1).first.attributes[attr]
# Only make an update on the same synced field & check that the previously
# created version also gets it.
- c.update_attributes!({attr => second_val})
+ c.update!({attr => second_val})
c.reload
assert_equal 2, c.version
assert_equal second_val, c.attributes[attr]
@@ -525,7 +525,7 @@ class CollectionTest < ActiveSupport::TestCase
# Update attribute and check if version number should be incremented
old_value = c.attributes[attr]
- c.update_attributes!({attr => val})
+ c.update!({attr => val})
assert_equal new_version_expected, c.version == 2
assert_equal val, c.attributes[attr]
@@ -559,11 +559,11 @@ class CollectionTest < ActiveSupport::TestCase
col2 = create_collection 'bar', Encoding::US_ASCII
assert col2.valid?
assert_equal 1, col2.version
- col2.update_attributes({name: 'baz'})
+ col2.update({name: 'baz'})
assert_equal 2, col2.version
# Try to make col2 a past version of col1. It shouldn't be possible
- col2.update_attributes({current_version_uuid: col1.uuid})
+ col2.update({current_version_uuid: col1.uuid})
assert col2.invalid?
col2.reload
assert_not_equal col1.uuid, col2.current_version_uuid
@@ -725,10 +725,10 @@ class CollectionTest < ActiveSupport::TestCase
test "storage_classes_desired cannot be empty" do
act_as_user users(:active) do
c = collections(:collection_owned_by_active)
- c.update_attributes storage_classes_desired: ["hot"]
+ c.update storage_classes_desired: ["hot"]
assert_equal ["hot"], c.storage_classes_desired
assert_raise ArvadosModel::InvalidStateTransitionError do
- c.update_attributes storage_classes_desired: []
+ c.update storage_classes_desired: []
end
end
end
@@ -736,7 +736,7 @@ class CollectionTest < ActiveSupport::TestCase
test "storage classes lists should only contain non-empty strings" do
c = collections(:storage_classes_desired_default_unconfirmed)
act_as_user users(:admin) do
- assert c.update_attributes(storage_classes_desired: ["default", "a_string"],
+ assert c.update(storage_classes_desired: ["default", "a_string"],
storage_classes_confirmed: ["another_string"])
[
["storage_classes_desired", ["default", 42]],
@@ -745,7 +745,7 @@ class CollectionTest < ActiveSupport::TestCase
["storage_classes_confirmed", [""]],
].each do |attr, val|
assert_raise ArvadosModel::InvalidStateTransitionError do
- assert c.update_attributes({attr => val})
+ assert c.update({attr => val})
end
end
end
@@ -754,7 +754,7 @@ class CollectionTest < ActiveSupport::TestCase
test "storage_classes_confirmed* can be set by admin user" do
c = collections(:storage_classes_desired_default_unconfirmed)
act_as_user users(:admin) do
- assert c.update_attributes(storage_classes_confirmed: ["default"],
+ assert c.update(storage_classes_confirmed: ["default"],
storage_classes_confirmed_at: Time.now)
end
end
@@ -764,16 +764,16 @@ class CollectionTest < ActiveSupport::TestCase
c = collections(:storage_classes_desired_default_unconfirmed)
# Cannot set just one at a time.
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes storage_classes_confirmed: ["default"]
+ c.update storage_classes_confirmed: ["default"]
end
c.reload
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes storage_classes_confirmed_at: Time.now
+ c.update storage_classes_confirmed_at: Time.now
end
# Cannot set bot at once, either.
c.reload
assert_raise ArvadosModel::PermissionDeniedError do
- assert c.update_attributes(storage_classes_confirmed: ["default"],
+ assert c.update(storage_classes_confirmed: ["default"],
storage_classes_confirmed_at: Time.now)
end
end
@@ -784,15 +784,15 @@ class CollectionTest < ActiveSupport::TestCase
c = collections(:storage_classes_desired_default_confirmed_default)
# Cannot clear just one at a time.
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes storage_classes_confirmed: []
+ c.update storage_classes_confirmed: []
end
c.reload
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes storage_classes_confirmed_at: nil
+ c.update storage_classes_confirmed_at: nil
end
# Can clear both at once.
c.reload
- assert c.update_attributes(storage_classes_confirmed: [],
+ assert c.update(storage_classes_confirmed: [],
storage_classes_confirmed_at: nil)
end
end
@@ -802,7 +802,7 @@ class CollectionTest < ActiveSupport::TestCase
Rails.configuration.Collections.DefaultReplication = 2
act_as_user users(:active) do
c = collections(:replication_undesired_unconfirmed)
- c.update_attributes replication_desired: ask
+ c.update replication_desired: ask
assert_equal ask, c.replication_desired
end
end
@@ -811,7 +811,7 @@ class CollectionTest < ActiveSupport::TestCase
test "replication_confirmed* can be set by admin user" do
c = collections(:replication_desired_2_unconfirmed)
act_as_user users(:admin) do
- assert c.update_attributes(replication_confirmed: 2,
+ assert c.update(replication_confirmed: 2,
replication_confirmed_at: Time.now)
end
end
@@ -821,14 +821,14 @@ class CollectionTest < ActiveSupport::TestCase
c = collections(:replication_desired_2_unconfirmed)
# Cannot set just one at a time.
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes replication_confirmed: 1
+ c.update replication_confirmed: 1
end
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes replication_confirmed_at: Time.now
+ c.update replication_confirmed_at: Time.now
end
# Cannot set both at once, either.
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes(replication_confirmed: 1,
+ c.update(replication_confirmed: 1,
replication_confirmed_at: Time.now)
end
end
@@ -839,15 +839,15 @@ class CollectionTest < ActiveSupport::TestCase
c = collections(:replication_desired_2_confirmed_2)
# Cannot clear just one at a time.
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes replication_confirmed: nil
+ c.update replication_confirmed: nil
end
c.reload
assert_raise ArvadosModel::PermissionDeniedError do
- c.update_attributes replication_confirmed_at: nil
+ c.update replication_confirmed_at: nil
end
# Can clear both at once.
c.reload
- assert c.update_attributes(replication_confirmed: nil,
+ assert c.update(replication_confirmed: nil,
replication_confirmed_at: nil)
end
end
@@ -855,7 +855,7 @@ class CollectionTest < ActiveSupport::TestCase
test "clear replication_confirmed* when introducing a new block in manifest" do
c = collections(:replication_desired_2_confirmed_2)
act_as_user users(:active) do
- assert c.update_attributes(manifest_text: collections(:user_agreement).signed_manifest_text_only_for_tests)
+ assert c.update(manifest_text: collections(:user_agreement).signed_manifest_text_only_for_tests)
assert_nil c.replication_confirmed
assert_nil c.replication_confirmed_at
end
@@ -865,7 +865,7 @@ class CollectionTest < ActiveSupport::TestCase
c = collections(:replication_desired_2_confirmed_2)
act_as_user users(:active) do
new_manifest = c.signed_manifest_text_only_for_tests.sub(':bar', ':foo')
- assert c.update_attributes(manifest_text: new_manifest)
+ assert c.update(manifest_text: new_manifest)
assert_equal 2, c.replication_confirmed
assert_not_nil c.replication_confirmed_at
end
@@ -882,7 +882,7 @@ class CollectionTest < ActiveSupport::TestCase
# not, this test would pass without testing the relevant case):
assert_operator new_manifest.length+40, :<, c.signed_manifest_text_only_for_tests.length
- assert c.update_attributes(manifest_text: new_manifest)
+ assert c.update(manifest_text: new_manifest)
assert_equal 2, c.replication_confirmed
assert_not_nil c.replication_confirmed_at
end
@@ -892,7 +892,7 @@ class CollectionTest < ActiveSupport::TestCase
act_as_user users(:active) do
t0 = db_current_time
c = Collection.create!(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\n", name: 'foo')
- c.update_attributes! trash_at: (t0 + 1.hours)
+ c.update! trash_at: (t0 + 1.hours)
c.reload
sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text_only_for_tests)[1].to_i
assert_operator sig_exp.to_i, :<=, (t0 + 1.hours).to_i
@@ -932,7 +932,7 @@ class CollectionTest < ActiveSupport::TestCase
assert_not_empty c, 'Should be able to find live collection'
# mark collection as expired
- c.first.update_attributes!(trash_at: Time.new.strftime("%Y-%m-%d"))
+ c.first.update!(trash_at: Time.new.strftime("%Y-%m-%d"))
c = Collection.readable_by(current_user).where(uuid: uuid)
assert_empty c, 'Should not be able to find expired collection'
@@ -947,7 +947,7 @@ class CollectionTest < ActiveSupport::TestCase
act_as_user users(:active) do
t0 = db_current_time
c = Collection.create!(manifest_text: '', name: 'foo')
- c.update_attributes! trash_at: (t0 - 2.weeks)
+ c.update! trash_at: (t0 - 2.weeks)
c.reload
assert_operator c.trash_at, :>, t0
end
@@ -1002,7 +1002,7 @@ class CollectionTest < ActiveSupport::TestCase
else
c = collections(fixture_name)
end
- updates_ok = c.update_attributes(updates)
+ updates_ok = c.update(updates)
expect_valid = expect[:state] != :invalid
assert_equal expect_valid, updates_ok, c.errors.full_messages.to_s
case expect[:state]
@@ -1039,13 +1039,13 @@ class CollectionTest < ActiveSupport::TestCase
start = db_current_time
act_as_user users(:active) do
c = Collection.create!(manifest_text: '', name: 'foo')
- c.update_attributes!(trash_at: start + 86400.seconds)
+ c.update!(trash_at: start + 86400.seconds)
assert_operator c.delete_at, :>=, start + (86400*22).seconds
assert_operator c.delete_at, :<, start + (86400*22 + 30).seconds
c.destroy
c = Collection.create!(manifest_text: '', name: 'foo')
- c.update_attributes!(is_trashed: true)
+ c.update!(is_trashed: true)
assert_operator c.delete_at, :>=, start + (86400*21).seconds
end
end
diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb
index e5c0085184..d25c08a579 100644
--- a/services/api/test/unit/container_request_test.rb
+++ b/services/api/test/unit/container_request_test.rb
@@ -34,8 +34,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
def lock_and_run(ctr)
act_as_system_user do
- ctr.update_attributes!(state: Container::Locked)
- ctr.update_attributes!(state: Container::Running)
+ ctr.update!(state: Container::Locked)
+ ctr.update!(state: Container::Running)
end
end
@@ -107,7 +107,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
{"runtime_constraints" => {"vcpus" => 1}},
{"runtime_constraints" => {"vcpus" => 1, "ram" => nil}},
{"runtime_constraints" => {"vcpus" => 0, "ram" => 123}},
- {"runtime_constraints" => {"vcpus" => "1", "ram" => "123"}},
+ {"runtime_constraints" => {"vcpus" => "1", "ram" => -1}},
{"mounts" => {"FOO" => "BAR"}},
{"mounts" => {"FOO" => {}}},
{"mounts" => {"FOO" => {"kind" => "tmp", "capacity" => 42.222}}},
@@ -129,7 +129,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
cr.save!
assert_raises(ActiveRecord::RecordInvalid) do
cr = ContainerRequest.find_by_uuid cr.uuid
- cr.update_attributes!({state: "Committed",
+ cr.update!({state: "Committed",
priority: 1}.merge(value))
end
end
@@ -138,7 +138,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
test "Update from fixture" do
set_user_from_auth :active
cr = ContainerRequest.find_by_uuid(container_requests(:running).uuid)
- cr.update_attributes!(description: "New description")
+ cr.update!(description: "New description")
assert_equal "New description", cr.description
end
@@ -147,7 +147,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
cr = create_minimal_req!(state: "Uncommitted", priority: 1)
cr.save!
cr = ContainerRequest.find_by_uuid cr.uuid
- cr.update_attributes!(state: "Committed",
+ cr.update!(state: "Committed",
runtime_constraints: {"vcpus" => 1, "ram" => 23})
assert_not_nil cr.container_uuid
end
@@ -164,7 +164,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
test "Container request commit" do
set_user_from_auth :active
- cr = create_minimal_req!(runtime_constraints: {"vcpus" => 2, "ram" => 30})
+ cr = create_minimal_req!(runtime_constraints: {"vcpus" => 2, "ram" => 300000000})
assert_nil cr.container_uuid
@@ -175,7 +175,9 @@ class ContainerRequestTest < ActiveSupport::TestCase
cr.reload
- assert ({"vcpus" => 2, "ram" => 30}.to_a - cr.runtime_constraints.to_a).empty?
+ assert_empty({"vcpus" => 2, "ram" => 300000000}.to_a - cr.runtime_constraints.to_a)
+
+ assert_equal 0, Rails.configuration.Containers.DefaultKeepCacheRAM
assert_not_nil cr.container_uuid
c = Container.find_by_uuid cr.container_uuid
@@ -186,7 +188,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_equal({}, c.environment)
assert_equal({"/out" => {"kind"=>"tmp", "capacity"=>1000000}}, c.mounts)
assert_equal "/out", c.output_path
- assert ({"keep_cache_ram"=>268435456, "vcpus" => 2, "ram" => 30}.to_a - c.runtime_constraints.to_a).empty?
+ assert ({"keep_cache_disk" => 2<<30, "keep_cache_ram" => 0, "vcpus" => 2, "ram" => 300000000}.to_a - c.runtime_constraints.to_a).empty?
assert_operator 0, :<, c.priority
assert_raises(ActiveRecord::RecordInvalid) do
@@ -215,7 +217,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_operator c1.priority, :<, c2.priority
c2priority_was = c2.priority
- cr1.update_attributes!(priority: 0)
+ cr1.update!(priority: 0)
c1.reload
assert_equal 0, c1.priority
@@ -231,11 +233,12 @@ class ContainerRequestTest < ActiveSupport::TestCase
act_as_system_user do
Container.find_by_uuid(cr.container_uuid).
- update_attributes!(state: Container::Cancelled)
+ update!(state: Container::Cancelled, cost: 1.25)
end
cr.reload
assert_equal "Final", cr.state
+ assert_equal 1.25, cr.cumulative_cost
assert_equal users(:active).uuid, cr.modified_by_user_uuid
end
@@ -249,8 +252,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
c
end
@@ -260,13 +263,15 @@ class ContainerRequestTest < ActiveSupport::TestCase
output_pdh = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
log_pdh = 'fa7aeb5140e2848d39b416daeef4ffc5+45'
act_as_system_user do
- c.update_attributes!(state: Container::Complete,
+ c.update!(state: Container::Complete,
+ cost: 1.25,
output: output_pdh,
log: log_pdh)
end
cr.reload
assert_equal "Final", cr.state
+ assert_equal 1.25, cr.cumulative_cost
assert_equal users(:active).uuid, cr.modified_by_user_uuid
assert_not_nil cr.output_uuid
@@ -297,8 +302,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running,
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running,
output: output_pdh,
log: log_pdh)
c
@@ -310,7 +315,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
act_as_system_user do
Collection.where(portable_data_hash: output_pdh).delete_all
Collection.where(portable_data_hash: log_pdh).delete_all
- c.update_attributes!(state: Container::Complete)
+ c.update!(state: Container::Complete)
end
cr.reload
@@ -328,8 +333,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
c
end
@@ -389,14 +394,15 @@ class ContainerRequestTest < ActiveSupport::TestCase
]
parents = toplevel_crs.map(&findctr)
- children = parents.map do |parent|
+ children_crs = parents.map do |parent|
lock_and_run(parent)
with_container_auth(parent) do
create_minimal_req!(state: "Committed",
priority: 1,
environment: {"child" => parent.environment["workflow"]})
end
- end.map(&findctr)
+ end
+ children = children_crs.map(&findctr)
grandchildren = children.reverse.map do |child|
lock_and_run(child)
@@ -448,7 +454,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
# increasing priority of the most recent toplevel container should
# reprioritize all of its descendants (including the shared
# grandchild) above everything else.
- toplevel_crs[2].update_attributes!(priority: 72)
+ toplevel_crs[2].update!(priority: 72)
(parents + children + grandchildren + [shared_grandchild]).map(&:reload)
assert_operator shared_grandchild.priority, :>, grandchildren[0].priority
assert_operator shared_grandchild.priority, :>, children[0].priority
@@ -461,6 +467,36 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_operator shared_grandchild.priority, :<=, grandchildren[2].priority
assert_operator shared_grandchild.priority, :<=, children[2].priority
assert_operator shared_grandchild.priority, :<=, parents[2].priority
+
+ # cancelling the most recent toplevel container should
+ # reprioritize all of its descendants (except the shared
+ # grandchild) to zero
+ toplevel_crs[2].update!(priority: 0)
+ (parents + children + grandchildren + [shared_grandchild]).map(&:reload)
+ assert_operator 0, :==, parents[2].priority
+ assert_operator 0, :==, children[2].priority
+ assert_operator 0, :==, grandchildren[2].priority
+ assert_operator shared_grandchild.priority, :==, grandchildren[0].priority
+
+ # cancel a child request, the parent should be > 0 but
+ # the child and grandchild go to 0.
+ children_crs[1].update!(priority: 0)
+ (parents + children + grandchildren + [shared_grandchild]).map(&:reload)
+ assert_operator 0, :<, parents[1].priority
+ assert_operator parents[0].priority, :>, parents[1].priority
+ assert_operator 0, :==, children[1].priority
+ assert_operator 0, :==, grandchildren[1].priority
+ assert_operator shared_grandchild.priority, :==, grandchildren[0].priority
+
+ # update the parent, it should get a higher priority but the children and
+ # grandchildren should remain at 0
+ toplevel_crs[1].update!(priority: 6)
+ (parents + children + grandchildren + [shared_grandchild]).map(&:reload)
+ assert_operator 0, :<, parents[1].priority
+ assert_operator parents[0].priority, :<, parents[1].priority
+ assert_operator 0, :==, children[1].priority
+ assert_operator 0, :==, grandchildren[1].priority
+ assert_operator shared_grandchild.priority, :==, grandchildren[0].priority
end
[
@@ -477,23 +513,38 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
[
- ['running_container_auth', 'zzzzz-dz642-runningcontainr', 501],
- ].each do |token, expected, expected_priority|
- test "create as #{token} with requesting_container_uuid set and expect output to be intermediate" do
+ [:admin, 0, "output"],
+ [:admin, 19, "output"],
+ [:admin, nil, "output"],
+ [:running_container_auth, 0, "intermediate"],
+ [:running_container_auth, 29, "intermediate"],
+ [:running_container_auth, nil, "intermediate"],
+ ].each do |token, exit_code, expect_output_type|
+ test "container with exit_code #{exit_code} has collection types set with output type #{expect_output_type}" do
+ final_state = if exit_code.nil?
+ Container::Cancelled
+ else
+ Container::Complete
+ end
set_user_from_auth token
- cr = create_minimal_req!
- assert_not_nil cr.uuid, 'uuid should be set for newly created container_request'
- assert_equal expected, cr.requesting_container_uuid
- assert_equal expected_priority, cr.priority
-
- cr.state = ContainerRequest::Committed
- cr.save!
-
- run_container(cr)
- cr.reload
- output = Collection.find_by_uuid(cr.output_uuid)
- props = {"type": "intermediate", "container_request": cr.uuid}
- assert_equal props.symbolize_keys, output.properties.symbolize_keys
+ request = create_minimal_req!(
+ container_count_max: 1,
+ priority: 500,
+ state: ContainerRequest::Committed,
+ )
+ run_container(request, final_state: final_state, exit_code: exit_code)
+ request.reload
+ assert_equal(ContainerRequest::Final, request.state)
+
+ output = Collection.find_by_uuid(request.output_uuid)
+ assert_not_nil(output)
+ assert_equal(request.uuid, output.properties["container_request"])
+ assert_equal(expect_output_type, output.properties["type"])
+
+ log = Collection.find_by_uuid(request.log_uuid)
+ assert_not_nil(log)
+ assert_equal(request.uuid, log.properties["container_request"])
+ assert_equal("log", log.properties["type"])
end
end
@@ -754,7 +805,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
# should be assigned.
# * When use_existing is false, a different container should be assigned.
# * When env1 and env2 are different, a different container should be assigned.
- cr2.update_attributes!({state: ContainerRequest::Committed})
+ cr2.update!({state: ContainerRequest::Committed})
assert_equal (cr2.use_existing == true and (env1 == env2)),
(cr1.container_uuid == cr2.container_uuid)
end
@@ -775,8 +826,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
c
end
@@ -788,7 +839,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
prev_container_uuid = cr.container_uuid
act_as_system_user do
- c.update_attributes!(state: Container::Cancelled)
+ c.update!(cost: 0.5, subrequests_cost: 1.25)
+ c.update!(state: Container::Cancelled)
end
cr.reload
@@ -800,7 +852,10 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Cancelled)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
+ c.update!(cost: 0.125)
+ c.update!(state: Container::Cancelled)
c
end
@@ -809,6 +864,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_equal "Final", cr.state
assert_equal prev_container_uuid, cr.container_uuid
assert_not_equal cr2.container_uuid, cr.container_uuid
+ assert_equal 1.875, cr.cumulative_cost
end
test "Retry on container cancelled with runtime_token" do
@@ -822,8 +878,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
assert_equal spec.token, c.runtime_token
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
c
end
@@ -833,7 +889,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
prev_container_uuid = cr.container_uuid
act_as_system_user do
- c.update_attributes!(state: Container::Cancelled)
+ c.update!(state: Container::Cancelled)
end
cr.reload
@@ -844,7 +900,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
assert_equal spec.token, c.runtime_token
- c.update_attributes!(state: Container::Cancelled)
+ c.update!(state: Container::Cancelled)
c
end
@@ -860,8 +916,8 @@ class ContainerRequestTest < ActiveSupport::TestCase
c = act_as_system_user do
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
c
end
@@ -876,7 +932,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
logc = Collection.new(manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n")
logc.save!
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Cancelled, log: logc.portable_data_hash)
+ c.update!(state: Container::Cancelled, log: logc.portable_data_hash)
c
end
end
@@ -894,6 +950,174 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
+ test "Retry sub-request on error" do
+ set_user_from_auth :active
+ cr1 = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 2, command: ["echo", "foo1"])
+ c1 = Container.find_by_uuid(cr1.container_uuid)
+ act_as_system_user do
+ c1.update!(state: Container::Locked)
+ c1.update!(state: Container::Running)
+ end
+
+ cr2 = with_container_auth(c1) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 2, command: ["echo", "foo2"])
+ end
+ c2 = Container.find_by_uuid(cr2.container_uuid)
+ act_as_system_user do
+ c2.update!(state: Container::Locked)
+ c2.update!(state: Container::Running)
+ end
+
+ cr3 = with_container_auth(c2) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 2, command: ["echo", "foo3"])
+ end
+ c3 = Container.find_by_uuid(cr3.container_uuid)
+
+ act_as_system_user do
+ c3.update!(state: Container::Locked)
+ c3.update!(state: Container::Running)
+ end
+
+ # All the containers are in running state
+
+ c3.reload
+ cr3.reload
+
+ # c3 still running
+ assert_equal 'Running', c3.state
+ assert_equal 1, cr3.container_count
+ assert_equal 'Committed', cr3.state
+
+ # c3 goes to cancelled state
+ act_as_system_user do
+ c3.state = "Cancelled"
+ c3.save!
+ end
+
+ cr3.reload
+
+ # Because the parent request is still live, it should
+ # be retried.
+ assert_equal 2, cr3.container_count
+ assert_equal 'Committed', cr3.state
+ end
+
+ test "Do not retry sub-request when process tree is cancelled" do
+ set_user_from_auth :active
+ cr1 = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 2, command: ["echo", "foo1"])
+ c1 = Container.find_by_uuid(cr1.container_uuid)
+ act_as_system_user do
+ c1.update!(state: Container::Locked)
+ c1.update!(state: Container::Running)
+ end
+
+ cr2 = with_container_auth(c1) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 2, command: ["echo", "foo2"])
+ end
+ c2 = Container.find_by_uuid(cr2.container_uuid)
+ act_as_system_user do
+ c2.update!(state: Container::Locked)
+ c2.update!(state: Container::Running)
+ end
+
+ cr3 = with_container_auth(c2) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 2, command: ["echo", "foo3"])
+ end
+ c3 = Container.find_by_uuid(cr3.container_uuid)
+
+ act_as_system_user do
+ c3.update!(state: Container::Locked)
+ c3.update!(state: Container::Running)
+ end
+
+ # All the containers are in running state
+
+ # Now cancel the toplevel container request
+ act_as_system_user do
+ cr1.priority = 0
+ cr1.save!
+ end
+
+ c3.reload
+ cr3.reload
+
+ # c3 still running
+ assert_equal 'Running', c3.state
+ assert_equal 1, cr3.container_count
+ assert_equal 'Committed', cr3.state
+
+ # c3 goes to cancelled state
+ act_as_system_user do
+ assert_equal 0, c3.priority
+ c3.state = "Cancelled"
+ c3.save!
+ end
+
+ cr3.reload
+
+ # Because the parent process was cancelled, it _should not_ be
+ # retried.
+ assert_equal 1, cr3.container_count
+ assert_equal 'Final', cr3.state
+ end
+
+ test "Retry process tree on error" do
+ set_user_from_auth :active
+ cr1 = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 2, command: ["echo", "foo1"])
+ c1 = Container.find_by_uuid(cr1.container_uuid)
+ act_as_system_user do
+ c1.update!(state: Container::Locked)
+ c1.update!(state: Container::Running)
+ end
+
+ cr2 = with_container_auth(c1) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 2, command: ["echo", "foo2"])
+ end
+ c2 = Container.find_by_uuid(cr2.container_uuid)
+ act_as_system_user do
+ c2.update!(state: Container::Locked)
+ c2.update!(state: Container::Running)
+ end
+
+ cr3 = with_container_auth(c2) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 2, command: ["echo", "foo3"])
+ end
+ c3 = Container.find_by_uuid(cr3.container_uuid)
+
+ act_as_system_user do
+ c3.update!(state: Container::Locked)
+ c3.update!(state: Container::Running)
+ end
+
+ # All the containers are in running state
+
+ c1.reload
+
+ # c1 goes to cancelled state
+ act_as_system_user do
+ c1.state = "Cancelled"
+ c1.save!
+ end
+
+ cr1.reload
+ cr2.reload
+ cr3.reload
+
+ # Because the root request is still live, it should be retried.
+ # Assumes the root is something like arvados-cwl-runner where
+ # container reuse enables it to more or less pick up where it left
+ # off.
+ assert_equal 2, cr1.container_count
+ assert_equal 'Committed', cr1.state
+
+ # These keep running.
+ assert_equal 1, cr2.container_count
+ assert_equal 'Committed', cr2.state
+
+ assert_equal 1, cr3.container_count
+ assert_equal 'Committed', cr3.state
+ end
+
test "Output collection name setting using output_name with name collision resolution" do
set_user_from_auth :active
output_name = 'unimaginative name'
@@ -907,13 +1131,13 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_equal ContainerRequest::Final, cr.state
output_coll = Collection.find_by_uuid(cr.output_uuid)
# Make sure the resulting output collection name include the original name
- # plus the date
+ # plus the last 15 characters of uuid
assert_not_equal output_name, output_coll.name,
"more than one collection with the same owner and name"
assert output_coll.name.include?(output_name),
"New name should include original name"
- assert_match /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z/, output_coll.name,
- "New name should include ISO8601 date"
+ assert_match /#{output_coll.uuid[-15..-1]}/, output_coll.name,
+ "New name should include last 15 characters of uuid"
end
[[0, :check_output_ttl_0],
@@ -954,17 +1178,17 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_in_delta(delete, now + year, 10)
end
- def run_container(cr)
+ def run_container(cr, final_state: Container::Complete, exit_code: 0)
act_as_system_user do
logc = Collection.new(owner_uuid: system_user_uuid,
manifest_text: ". ef772b2f28e2c8ca84de45466ed19ee9+7815 0:0:arv-mount.txt\n")
logc.save!
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
- c.update_attributes!(state: Container::Complete,
- exit_code: 0,
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
+ c.update!(state: final_state,
+ exit_code: exit_code,
output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
log: logc.portable_data_hash)
logc.destroy
@@ -987,7 +1211,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
cr3 = create_minimal_req!(priority: 1, state: ContainerRequest::Uncommitted)
assert_equal ContainerRequest::Uncommitted, cr3.state
- cr3.update_attributes!(state: ContainerRequest::Committed)
+ cr3.update!(state: ContainerRequest::Committed)
assert_equal cr.container_uuid, cr3.container_uuid
assert_equal ContainerRequest::Final, cr3.state
end
@@ -1083,7 +1307,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
# Even though preemptible is not allowed, we should be able to
# commit a CR that was created earlier when preemptible was the
# default.
- commit_later.update_attributes!(priority: 1, state: "Committed")
+ commit_later.update!(priority: 1, state: "Committed")
expect[false].push commit_later
end
@@ -1099,7 +1323,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
# Cancelling the parent used to fail while updating the child
# containers' priority, because the child containers' unchanged
# preemptible fields caused validation to fail.
- parent.update_attributes!(state: 'Cancelled')
+ parent.update!(state: 'Cancelled')
[false, true].each do |pflag|
expect[pflag].each do |cr|
@@ -1226,7 +1450,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
when 'Final'
act_as_system_user do
Container.find_by_uuid(cr.container_uuid).
- update_attributes!(state: Container::Cancelled)
+ update!(state: Container::Cancelled)
end
cr.reload
else
@@ -1234,10 +1458,10 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
assert_equal state, cr.state
if permitted
- assert cr.update_attributes!(updates)
+ assert cr.update!(updates)
else
assert_raises(ActiveRecord::RecordInvalid) do
- cr.update_attributes!(updates)
+ cr.update!(updates)
end
end
end
@@ -1255,8 +1479,41 @@ class ContainerRequestTest < ActiveSupport::TestCase
cr.destroy
# the cr's container now has priority of 0
+ c.reload
+ assert_equal 0, c.priority
+ end
+ end
+
+ test "trash the project containing a container_request and check its container's priority" do
+ act_as_user users(:active) do
+ cr = ContainerRequest.find_by_uuid container_requests(:running_to_be_deleted).uuid
+
+ # initially the cr's container has priority > 0
c = Container.find_by_uuid(cr.container_uuid)
+ assert_equal 1, c.priority
+
+ prj = Group.find_by_uuid cr.owner_uuid
+ prj.update!(trash_at: db_current_time)
+
+ # the cr's container now has priority of 0
+ c.reload
assert_equal 0, c.priority
+
+ assert_equal c.state, 'Running'
+ assert_equal cr.state, 'Committed'
+
+ # mark the container as cancelled, this should cause the
+ # container request to go to final state and run the finalize
+ # function
+ act_as_system_user do
+ c.update!(state: 'Cancelled', log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')
+ end
+ c.reload
+ cr.reload
+
+ assert_equal c.state, 'Cancelled'
+ assert_equal cr.state, 'Final'
+ assert_equal nil, cr.log_uuid
end
end
@@ -1358,7 +1615,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
set_user_from_auth :active
cr = create_minimal_req!
- assert_equal false, cr.update_attributes(state: "Committed",
+ assert_equal false, cr.update(state: "Committed",
priority: 1,
mounts: cr.mounts.merge(sm),
secret_mounts: sm)
@@ -1378,7 +1635,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_not_nil ApiClientAuthorization.find_by_uuid(spec.uuid)
act_as_system_user do
- c.update_attributes!(state: Container::Complete,
+ c.update!(state: Container::Complete,
exit_code: 0,
output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')
@@ -1457,7 +1714,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_nil cr2.container_uuid
# Update cr2 to commited state, check for reuse, then run it
- cr2.update_attributes!({state: ContainerRequest::Committed})
+ cr2.update!({state: ContainerRequest::Committed})
assert_equal cr1.container_uuid, cr2.container_uuid
cr2.reload
@@ -1491,12 +1748,12 @@ class ContainerRequestTest < ActiveSupport::TestCase
logc.save!
c = Container.find_by_uuid(cr.container_uuid)
- c.update_attributes!(state: Container::Locked)
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
- c.update_attributes!(output_properties: container_prop)
+ c.update!(output_properties: container_prop)
- c.update_attributes!(state: Container::Complete,
+ c.update!(state: Container::Complete,
exit_code: 0,
output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
log: logc.portable_data_hash)
@@ -1511,4 +1768,63 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
end
+ test "Cumulative cost includes retried attempts but not reused containers" do
+ set_user_from_auth :active
+ cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 3)
+ c = Container.find_by_uuid cr.container_uuid
+ act_as_system_user do
+ c.update!(state: Container::Locked)
+ c.update!(state: Container::Running)
+ c.update!(state: Container::Cancelled, cost: 3)
+ end
+ cr.reload
+ assert_equal 3, cr.cumulative_cost
+
+ c = Container.find_by_uuid cr.container_uuid
+ lock_and_run c
+ c.reload
+ assert_equal 0, c.subrequests_cost
+
+ # cr2 is a child/subrequest
+ cr2 = with_container_auth(c) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 1, command: ["echo", "foo2"])
+ end
+ assert_equal c.uuid, cr2.requesting_container_uuid
+ c2 = Container.find_by_uuid cr2.container_uuid
+ act_as_system_user do
+ c2.update!(state: Container::Locked)
+ c2.update!(state: Container::Running)
+ logc = Collection.new(owner_uuid: system_user_uuid,
+ manifest_text: ". ef772b2f28e2c8ca84de45466ed19ee9+7815 0:0:arv-mount.txt\n")
+ logc.save!
+ c2.update!(state: Container::Complete,
+ exit_code: 0,
+ output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
+ log: logc.portable_data_hash,
+ cost: 7)
+ end
+ c.reload
+ assert_equal 7, c.subrequests_cost
+
+ # cr3 is an identical child/subrequest, will reuse c2
+ cr3 = with_container_auth(c) do
+ create_minimal_req!(priority: 10, state: "Committed", container_count_max: 1, command: ["echo", "foo2"])
+ end
+ assert_equal c.uuid, cr3.requesting_container_uuid
+ c3 = Container.find_by_uuid cr3.container_uuid
+ assert_equal c2.uuid, c3.uuid
+ assert_equal Container::Complete, c3.state
+ c.reload
+ assert_equal 7, c.subrequests_cost
+
+ act_as_system_user do
+ c.update!(state: Container::Complete, exit_code: 0, cost: 9)
+ end
+
+ c.reload
+ assert_equal 7, c.subrequests_cost
+ cr.reload
+ assert_equal 3+7+9, cr.cumulative_cost
+ end
+
end
diff --git a/services/api/test/unit/container_test.rb b/services/api/test/unit/container_test.rb
index a4c0ce1792..09b885b391 100644
--- a/services/api/test/unit/container_test.rb
+++ b/services/api/test/unit/container_test.rb
@@ -24,6 +24,7 @@ class ContainerTest < ActiveSupport::TestCase
output_path: "test",
runtime_constraints: {
"API" => false,
+ "keep_cache_disk" => 0,
"keep_cache_ram" => 0,
"ram" => 12000000000,
"vcpus" => 4
@@ -36,7 +37,28 @@ class ContainerTest < ActiveSupport::TestCase
},
secret_mounts: {},
runtime_user_uuid: "zzzzz-tpzed-xurymjxw79nv3jz",
- runtime_auth_scopes: ["all"]
+ runtime_auth_scopes: ["all"],
+ scheduling_parameters: {},
+ }
+
+ REUSABLE_ATTRS_SLIM = {
+ command: ["echo", "slim"],
+ container_image: "9ae44d5792468c58bcf85ce7353c7027+124",
+ cwd: "test",
+ environment: {},
+ mounts: {},
+ output_path: "test",
+ runtime_auth_scopes: ["all"],
+ runtime_constraints: {
+ "API" => false,
+ "keep_cache_disk" => 0,
+ "keep_cache_ram" => 0,
+ "ram" => 8 << 30,
+ "vcpus" => 4
+ },
+ runtime_user_uuid: "zzzzz-tpzed-xurymjxw79nv3jz",
+ secret_mounts: {},
+ scheduling_parameters: {},
}
def request_only attrs
@@ -54,7 +76,7 @@ class ContainerTest < ActiveSupport::TestCase
def check_illegal_updates c, bad_updates
bad_updates.each do |u|
- refute c.update_attributes(u), u.inspect
+ refute c.update(u), u.inspect
refute c.valid?, u.inspect
c.reload
end
@@ -151,15 +173,15 @@ class ContainerTest < ActiveSupport::TestCase
assert_equal Container::Queued, c.state
set_user_from_auth :dispatch1
- c.update_attributes! state: Container::Locked
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Locked
+ c.update! state: Container::Running
[
'error', 'errorDetail', 'warning', 'warningDetail', 'activity'
].each do |k|
# String type is allowed
string_val = 'A string is accepted'
- c.update_attributes! runtime_status: {k => string_val}
+ c.update! runtime_status: {k => string_val}
assert_equal string_val, c.runtime_status[k]
# Other types aren't allowed
@@ -167,7 +189,7 @@ class ContainerTest < ActiveSupport::TestCase
42, false, [], {}, nil
].each do |unallowed_val|
assert_raises ActiveRecord::RecordInvalid do
- c.update_attributes! runtime_status: {k => unallowed_val}
+ c.update! runtime_status: {k => unallowed_val}
end
end
end
@@ -187,41 +209,41 @@ class ContainerTest < ActiveSupport::TestCase
assert_equal Container::Queued, c1.state
assert_raises ArvadosModel::PermissionDeniedError do
- c1.update_attributes! runtime_status: {'error' => 'Oops!'}
+ c1.update! runtime_status: {'error' => 'Oops!'}
end
set_user_from_auth :dispatch1
# Allow updates when state = Locked
- c1.update_attributes! state: Container::Locked
- c1.update_attributes! runtime_status: {'error' => 'Oops!'}
+ c1.update! state: Container::Locked
+ c1.update! runtime_status: {'error' => 'Oops!'}
assert c1.runtime_status.key? 'error'
# Reset when transitioning from Locked to Queued
- c1.update_attributes! state: Container::Queued
+ c1.update! state: Container::Queued
assert_equal c1.runtime_status, {}
# Allow updates when state = Running
- c1.update_attributes! state: Container::Locked
- c1.update_attributes! state: Container::Running
- c1.update_attributes! runtime_status: {'error' => 'Oops!'}
+ c1.update! state: Container::Locked
+ c1.update! state: Container::Running
+ c1.update! runtime_status: {'error' => 'Oops!'}
assert c1.runtime_status.key? 'error'
# Don't allow updates on other states
- c1.update_attributes! state: Container::Complete
+ c1.update! state: Container::Complete
assert_raises ActiveRecord::RecordInvalid do
- c1.update_attributes! runtime_status: {'error' => 'Some other error'}
+ c1.update! runtime_status: {'error' => 'Some other error'}
end
set_user_from_auth :active
c2, _ = minimal_new(attrs)
assert_equal c2.runtime_status, {}
set_user_from_auth :dispatch1
- c2.update_attributes! state: Container::Locked
- c2.update_attributes! state: Container::Running
- c2.update_attributes! state: Container::Cancelled
+ c2.update! state: Container::Locked
+ c2.update! state: Container::Running
+ c2.update! state: Container::Cancelled
assert_raises ActiveRecord::RecordInvalid do
- c2.update_attributes! runtime_status: {'error' => 'Oops!'}
+ c2.update! runtime_status: {'error' => 'Oops!'}
end
end
@@ -229,7 +251,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
env = {"C" => "3", "B" => "2", "A" => "1"}
m = {"F" => {"kind" => "3"}, "E" => {"kind" => "2"}, "D" => {"kind" => "1"}}
- rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1, "API" => true, "cuda" => {"device_count":0, "driver_version": "", "hardware_capability": ""}}
+ rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1, "keep_cache_disk" => 0, "API" => true, "cuda" => {"device_count":0, "driver_version": "", "hardware_capability": ""}}
c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
c.reload
assert_equal Container.deep_sort_hash(env).to_json, c.environment.to_json
@@ -272,13 +294,13 @@ class ContainerTest < ActiveSupport::TestCase
assert_not_equal c_older.uuid, c_recent.uuid
set_user_from_auth :dispatch1
- c_older.update_attributes!({state: Container::Locked})
- c_older.update_attributes!({state: Container::Running})
- c_older.update_attributes!(completed_attrs)
+ c_older.update!({state: Container::Locked})
+ c_older.update!({state: Container::Running})
+ c_older.update!(completed_attrs)
- c_recent.update_attributes!({state: Container::Locked})
- c_recent.update_attributes!({state: Container::Running})
- c_recent.update_attributes!(completed_attrs)
+ c_recent.update!({state: Container::Locked})
+ c_recent.update!({state: Container::Running})
+ c_recent.update!(completed_attrs)
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -312,14 +334,14 @@ class ContainerTest < ActiveSupport::TestCase
out1 = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
log1 = collections(:real_log_collection).portable_data_hash
- c_output1.update_attributes!({state: Container::Locked})
- c_output1.update_attributes!({state: Container::Running})
- c_output1.update_attributes!(completed_attrs.merge({log: log1, output: out1}))
+ c_output1.update!({state: Container::Locked})
+ c_output1.update!({state: Container::Running})
+ c_output1.update!(completed_attrs.merge({log: log1, output: out1}))
out2 = 'fa7aeb5140e2848d39b416daeef4ffc5+45'
- c_output2.update_attributes!({state: Container::Locked})
- c_output2.update_attributes!({state: Container::Running})
- c_output2.update_attributes!(completed_attrs.merge({log: log1, output: out2}))
+ c_output2.update!({state: Container::Locked})
+ c_output2.update!({state: Container::Running})
+ c_output2.update!(completed_attrs.merge({log: log1, output: out2}))
set_user_from_auth :active
reused = Container.resolve(ContainerRequest.new(request_only(common_attrs)))
@@ -335,14 +357,14 @@ class ContainerTest < ActiveSupport::TestCase
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
set_user_from_auth :dispatch1
- c_slower.update_attributes!({state: Container::Locked})
- c_slower.update_attributes!({state: Container::Running,
+ c_slower.update!({state: Container::Locked})
+ c_slower.update!({state: Container::Running,
progress: 0.1})
- c_faster_started_first.update_attributes!({state: Container::Locked})
- c_faster_started_first.update_attributes!({state: Container::Running,
+ c_faster_started_first.update!({state: Container::Locked})
+ c_faster_started_first.update!({state: Container::Running,
progress: 0.15})
- c_faster_started_second.update_attributes!({state: Container::Locked})
- c_faster_started_second.update_attributes!({state: Container::Running,
+ c_faster_started_second.update!({state: Container::Locked})
+ c_faster_started_second.update!({state: Container::Running,
progress: 0.15})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -359,14 +381,14 @@ class ContainerTest < ActiveSupport::TestCase
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
set_user_from_auth :dispatch1
- c_slower.update_attributes!({state: Container::Locked})
- c_slower.update_attributes!({state: Container::Running,
+ c_slower.update!({state: Container::Locked})
+ c_slower.update!({state: Container::Running,
progress: 0.1})
- c_faster_started_first.update_attributes!({state: Container::Locked})
- c_faster_started_first.update_attributes!({state: Container::Running,
+ c_faster_started_first.update!({state: Container::Locked})
+ c_faster_started_first.update!({state: Container::Running,
progress: 0.15})
- c_faster_started_second.update_attributes!({state: Container::Locked})
- c_faster_started_second.update_attributes!({state: Container::Running,
+ c_faster_started_second.update!({state: Container::Locked})
+ c_faster_started_second.update!({state: Container::Running,
progress: 0.2})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -383,16 +405,16 @@ class ContainerTest < ActiveSupport::TestCase
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
set_user_from_auth :dispatch1
- c_slower.update_attributes!({state: Container::Locked})
- c_slower.update_attributes!({state: Container::Running,
+ c_slower.update!({state: Container::Locked})
+ c_slower.update!({state: Container::Running,
progress: 0.1})
- c_faster_started_first.update_attributes!({state: Container::Locked})
- c_faster_started_first.update_attributes!({state: Container::Running,
+ c_faster_started_first.update!({state: Container::Locked})
+ c_faster_started_first.update!({state: Container::Running,
runtime_status: {'warning' => 'This is not an error'},
progress: 0.15})
- c_faster_started_second.update_attributes!({state: Container::Locked})
+ c_faster_started_second.update!({state: Container::Locked})
assert_equal 0, Container.where("runtime_status->'error' is not null").count
- c_faster_started_second.update_attributes!({state: Container::Running,
+ c_faster_started_second.update!({state: Container::Running,
runtime_status: {'error' => 'Something bad happened'},
progress: 0.2})
assert_equal 1, Container.where("runtime_status->'error' is not null").count
@@ -411,11 +433,11 @@ class ContainerTest < ActiveSupport::TestCase
# Confirm the 3 container UUIDs are different.
assert_equal 3, [c_low_priority.uuid, c_high_priority_older.uuid, c_high_priority_newer.uuid].uniq.length
set_user_from_auth :dispatch1
- c_low_priority.update_attributes!({state: Container::Locked,
+ c_low_priority.update!({state: Container::Locked,
priority: 1})
- c_high_priority_older.update_attributes!({state: Container::Locked,
+ c_high_priority_older.update!({state: Container::Locked,
priority: 2})
- c_high_priority_newer.update_attributes!({state: Container::Locked,
+ c_high_priority_newer.update!({state: Container::Locked,
priority: 2})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -429,14 +451,14 @@ class ContainerTest < ActiveSupport::TestCase
c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_failed.uuid, c_running.uuid
set_user_from_auth :dispatch1
- c_failed.update_attributes!({state: Container::Locked})
- c_failed.update_attributes!({state: Container::Running})
- c_failed.update_attributes!({state: Container::Complete,
+ c_failed.update!({state: Container::Locked})
+ c_failed.update!({state: Container::Running})
+ c_failed.update!({state: Container::Complete,
exit_code: 42,
log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
output: 'ea10d51bcf88862dbcc36eb292017dfd+45'})
- c_running.update_attributes!({state: Container::Locked})
- c_running.update_attributes!({state: Container::Running,
+ c_running.update!({state: Container::Locked})
+ c_running.update!({state: Container::Running,
progress: 0.15})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -450,14 +472,14 @@ class ContainerTest < ActiveSupport::TestCase
c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_completed.uuid, c_running.uuid
set_user_from_auth :dispatch1
- c_completed.update_attributes!({state: Container::Locked})
- c_completed.update_attributes!({state: Container::Running})
- c_completed.update_attributes!({state: Container::Complete,
+ c_completed.update!({state: Container::Locked})
+ c_completed.update!({state: Container::Running})
+ c_completed.update!({state: Container::Complete,
exit_code: 0,
log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'})
- c_running.update_attributes!({state: Container::Locked})
- c_running.update_attributes!({state: Container::Running,
+ c_running.update!({state: Container::Locked})
+ c_running.update!({state: Container::Running,
progress: 0.15})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -471,9 +493,9 @@ class ContainerTest < ActiveSupport::TestCase
c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_running.uuid, c_locked.uuid
set_user_from_auth :dispatch1
- c_locked.update_attributes!({state: Container::Locked})
- c_running.update_attributes!({state: Container::Locked})
- c_running.update_attributes!({state: Container::Running,
+ c_locked.update!({state: Container::Locked})
+ c_running.update!({state: Container::Locked})
+ c_running.update!({state: Container::Running,
progress: 0.15})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
@@ -487,7 +509,7 @@ class ContainerTest < ActiveSupport::TestCase
c_queued, _ = minimal_new(common_attrs.merge({use_existing: false}))
assert_not_equal c_queued.uuid, c_locked.uuid
set_user_from_auth :dispatch1
- c_locked.update_attributes!({state: Container::Locked})
+ c_locked.update!({state: Container::Locked})
reused = Container.find_reusable(common_attrs)
assert_not_nil reused
assert_equal reused.uuid, c_locked.uuid
@@ -498,14 +520,42 @@ class ContainerTest < ActiveSupport::TestCase
attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed"}})
c, _ = minimal_new(attrs)
set_user_from_auth :dispatch1
- c.update_attributes!({state: Container::Locked})
- c.update_attributes!({state: Container::Running})
- c.update_attributes!({state: Container::Complete,
+ c.update!({state: Container::Locked})
+ c.update!({state: Container::Running})
+ c.update!({state: Container::Complete,
exit_code: 33})
reused = Container.find_reusable(attrs)
assert_nil reused
end
+ [[false, false, true],
+ [false, true, true],
+ [true, false, false],
+ [true, true, true]
+ ].each do |c1_preemptible, c2_preemptible, should_reuse|
+ [[Container::Queued, 1],
+ [Container::Locked, 1],
+ [Container::Running, 0], # not cancelled yet, but obviously will be soon
+ ].each do |c1_state, c1_priority|
+ test "find_reusable for #{c2_preemptible ? '' : 'non-'}preemptible req should #{should_reuse ? '' : 'not'} reuse a #{c1_state} #{c1_preemptible ? '' : 'non-'}preemptible container with priority #{c1_priority}" do
+ configure_preemptible_instance_type
+ set_user_from_auth :active
+ c1_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"test" => name, "state" => c1_state}, scheduling_parameters: {"preemptible" => c1_preemptible}})
+ c1, _ = minimal_new(c1_attrs)
+ set_user_from_auth :dispatch1
+ c1.update!({state: Container::Locked}) if c1_state != Container::Queued
+ c1.update!({state: Container::Running, priority: c1_priority}) if c1_state == Container::Running
+ c2_attrs = c1_attrs.merge({scheduling_parameters: {"preemptible" => c2_preemptible}})
+ reused = Container.find_reusable(c2_attrs)
+ if should_reuse && c1_priority > 0
+ assert_not_nil reused
+ else
+ assert_nil reused
+ end
+ end
+ end
+ end
+
test "find_reusable with logging disabled" do
set_user_from_auth :active
Rails.logger.expects(:info).never
@@ -594,14 +644,14 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
# No cuda
no_cuda_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"},
- runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_ram"=>268435456, "API" => false,
+ runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_disk"=>0, "keep_cache_ram"=>268435456, "API" => false,
"cuda" => {"device_count":0, "driver_version": "", "hardware_capability": ""}},})
c1, _ = minimal_new(no_cuda_attrs)
assert_equal Container::Queued, c1.state
# has cuda
cuda_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"},
- runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_ram"=>268435456, "API" => false,
+ runtime_constraints: {"vcpus" => 1, "ram" => 1, "keep_cache_disk"=>0, "keep_cache_ram"=>268435456, "API" => false,
"cuda" => {"device_count":1, "driver_version": "11.0", "hardware_capability": "9.0"}},})
c2, _ = minimal_new(cuda_attrs)
assert_equal Container::Queued, c2.state
@@ -626,7 +676,7 @@ class ContainerTest < ActiveSupport::TestCase
{state: Container::Complete}]
c.lock
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
check_illegal_modify c
check_bogus_states c
@@ -634,7 +684,7 @@ class ContainerTest < ActiveSupport::TestCase
check_illegal_updates c, [{state: Container::Queued}]
c.reload
- c.update_attributes! priority: 3
+ c.update! priority: 3
end
test "Lock and unlock" do
@@ -649,11 +699,11 @@ class ContainerTest < ActiveSupport::TestCase
c.lock
end
c.reload
- assert cr.update_attributes priority: 1
+ assert cr.update priority: 1
- refute c.update_attributes(state: Container::Running), "not locked"
+ refute c.update(state: Container::Running), "not locked"
c.reload
- refute c.update_attributes(state: Container::Complete), "not locked"
+ refute c.update(state: Container::Complete), "not locked"
c.reload
assert c.lock, show_errors(c)
@@ -667,13 +717,13 @@ class ContainerTest < ActiveSupport::TestCase
refute c.locked_by_uuid
refute c.auth_uuid
- refute c.update_attributes(state: Container::Running), "not locked"
+ refute c.update(state: Container::Running), "not locked"
c.reload
refute c.locked_by_uuid
refute c.auth_uuid
assert c.lock, show_errors(c)
- assert c.update_attributes(state: Container::Running), show_errors(c)
+ assert c.update(state: Container::Running), show_errors(c)
assert c.locked_by_uuid
assert c.auth_uuid
@@ -690,7 +740,7 @@ class ContainerTest < ActiveSupport::TestCase
end
c.reload
- assert c.update_attributes(state: Container::Complete), show_errors(c)
+ assert c.update(state: Container::Complete), show_errors(c)
refute c.locked_by_uuid
refute c.auth_uuid
@@ -750,7 +800,7 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :active
c, cr = minimal_new({container_count_max: 1})
set_user_from_auth :dispatch1
- assert c.update_attributes(state: Container::Cancelled), show_errors(c)
+ assert c.update(state: Container::Cancelled), show_errors(c)
check_no_change_from_cancelled c
cr.reload
assert_equal ContainerRequest::Final, cr.state
@@ -773,7 +823,7 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
assert c.lock, show_errors(c)
- assert c.update_attributes(state: Container::Cancelled), show_errors(c)
+ assert c.update(state: Container::Cancelled), show_errors(c)
check_no_change_from_cancelled c
end
@@ -793,7 +843,7 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
assert c.lock, show_errors(c)
- assert c.update_attributes(
+ assert c.update(
state: Container::Cancelled,
log: collections(:real_log_collection).portable_data_hash,
), show_errors(c)
@@ -805,8 +855,8 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
c.lock
- c.update_attributes! state: Container::Running
- c.update_attributes! state: Container::Cancelled
+ c.update! state: Container::Running
+ c.update! state: Container::Cancelled
check_no_change_from_cancelled c
end
@@ -856,16 +906,16 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :dispatch1
c.lock
if start_state != Container::Locked
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
if start_state != Container::Running
- c.update_attributes! state: start_state
+ c.update! state: start_state
end
end
end
assert_equal c.state, start_state
set_user_from_auth :active
assert_raises(ArvadosModel::PermissionDeniedError) do
- c.update_attributes! updates
+ c.update! updates
end
end
end
@@ -876,9 +926,9 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :dispatch1
c.lock
check_illegal_updates c, [{exit_code: 1}]
- c.update_attributes! state: Container::Running
- assert c.update_attributes(exit_code: 1)
- assert c.update_attributes(exit_code: 1, state: Container::Complete)
+ c.update! state: Container::Running
+ assert c.update(exit_code: 1)
+ assert c.update(exit_code: 1, state: Container::Complete)
end
test "locked_by_uuid can update log when locked/running, and output when running" do
@@ -897,8 +947,8 @@ class ContainerTest < ActiveSupport::TestCase
set_user_from_auth :dispatch1
c.lock
assert_equal c.locked_by_uuid, Thread.current[:api_client_authorization].uuid
- c.update_attributes!(log: logpdh_time1)
- c.update_attributes!(state: Container::Running)
+ c.update!(log: logpdh_time1)
+ c.update!(state: Container::Running)
cr1.reload
cr2.reload
cr1log_uuid = cr1.log_uuid
@@ -909,17 +959,17 @@ class ContainerTest < ActiveSupport::TestCase
assert_not_equal logcoll.uuid, cr2log_uuid
assert_not_equal cr1log_uuid, cr2log_uuid
- logcoll.update_attributes!(manifest_text: logcoll.manifest_text + ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n")
+ logcoll.update!(manifest_text: logcoll.manifest_text + ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n")
logpdh_time2 = logcoll.portable_data_hash
- assert c.update_attributes(output: collections(:collection_owned_by_active).portable_data_hash)
- assert c.update_attributes(log: logpdh_time2)
- assert c.update_attributes(state: Container::Complete, log: logcoll.portable_data_hash)
+ assert c.update(output: collections(:collection_owned_by_active).portable_data_hash)
+ assert c.update(log: logpdh_time2)
+ assert c.update(state: Container::Complete, log: logcoll.portable_data_hash)
c.reload
assert_equal collections(:collection_owned_by_active).portable_data_hash, c.output
assert_equal logpdh_time2, c.log
- refute c.update_attributes(output: nil)
- refute c.update_attributes(log: nil)
+ refute c.update(output: nil)
+ refute c.update(log: nil)
cr1.reload
cr2.reload
assert_equal cr1log_uuid, cr1.log_uuid
@@ -942,7 +992,7 @@ class ContainerTest < ActiveSupport::TestCase
end
set_user_from_auth :dispatch1
c.lock
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
if tok == "runtime_token"
auth = ApiClientAuthorization.validate(token: c.runtime_token)
@@ -958,14 +1008,14 @@ class ContainerTest < ActiveSupport::TestCase
Thread.current[:user] = auth.user
end
- assert c.update_attributes(gateway_address: "127.0.0.1:9")
- assert c.update_attributes(output: collections(:collection_owned_by_active).portable_data_hash)
- assert c.update_attributes(runtime_status: {'warning' => 'something happened'})
- assert c.update_attributes(progress: 0.5)
- assert c.update_attributes(exit_code: 0)
- refute c.update_attributes(log: collections(:real_log_collection).portable_data_hash)
+ assert c.update(gateway_address: "127.0.0.1:9")
+ assert c.update(output: collections(:collection_owned_by_active).portable_data_hash)
+ assert c.update(runtime_status: {'warning' => 'something happened'})
+ assert c.update(progress: 0.5)
+ assert c.update(exit_code: 0)
+ refute c.update(log: collections(:real_log_collection).portable_data_hash)
c.reload
- assert c.update_attributes(state: Container::Complete, exit_code: 0)
+ assert c.update(state: Container::Complete, exit_code: 0)
end
end
@@ -974,13 +1024,13 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
c.lock
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
assert_raises ActiveRecord::RecordInvalid do
- c.update_attributes! output: collections(:collection_not_readable_by_active).portable_data_hash
+ c.update! output: collections(:collection_not_readable_by_active).portable_data_hash
end
end
@@ -989,11 +1039,11 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
c.lock
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
set_user_from_auth :running_to_be_deleted_container_auth
assert_raises(ArvadosModel::PermissionDeniedError) do
- c.update_attributes(output: collections(:foo_file).portable_data_hash)
+ c.update(output: collections(:foo_file).portable_data_hash)
end
end
@@ -1002,13 +1052,13 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
c.lock
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
output = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3jk')
assert output.is_trashed
- assert c.update_attributes output: output.portable_data_hash
- assert c.update_attributes! state: Container::Complete
+ assert c.update output: output.portable_data_hash
+ assert c.update! state: Container::Complete
end
test "not allowed to set trashed output that is not readable by current user" do
@@ -1016,7 +1066,7 @@ class ContainerTest < ActiveSupport::TestCase
c, _ = minimal_new
set_user_from_auth :dispatch1
c.lock
- c.update_attributes! state: Container::Running
+ c.update! state: Container::Running
output = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3jr')
@@ -1024,7 +1074,7 @@ class ContainerTest < ActiveSupport::TestCase
Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
assert_raises ActiveRecord::RecordInvalid do
- c.update_attributes! output: output.portable_data_hash
+ c.update! output: output.portable_data_hash
end
end
@@ -1047,12 +1097,12 @@ class ContainerTest < ActiveSupport::TestCase
container_count_max: 1, runtime_token: api_client_authorizations(:active).token)
set_user_from_auth :dispatch1
c.lock
- c.update_attributes!(state: Container::Running)
+ c.update!(state: Container::Running)
c.reload
assert c.secret_mounts.has_key?('/secret')
assert_equal api_client_authorizations(:active).token, c.runtime_token
- c.update_attributes!(final_attrs)
+ c.update!(final_attrs)
c.reload
assert_equal({}, c.secret_mounts)
assert_nil c.runtime_token
@@ -1062,4 +1112,266 @@ class ContainerTest < ActiveSupport::TestCase
assert_no_secrets_logged
end
end
+
+ def configure_preemptible_instance_type
+ Rails.configuration.InstanceTypes = ConfigLoader.to_OrderedOptions({
+ "a1.small.pre" => {
+ "Preemptible" => true,
+ "Price" => 0.1,
+ "ProviderType" => "a1.small",
+ "VCPUs" => 1,
+ "RAM" => 1000000000,
+ },
+ })
+ end
+
+ def vary_parameters(**kwargs)
+ # kwargs is a hash that maps parameters to an array of values.
+ # This function enumerates every possible hash where each key has one of
+ # the values from its array.
+ # The output keys are strings since that's what container hash attributes
+ # want.
+ # A nil value yields a hash without that key.
+ [[:_, nil]].product(
+ *kwargs.map { |(key, values)| [key.to_s].product(values) },
+ ).map { |param_pairs| Hash[param_pairs].compact }
+ end
+
+ def retry_with_scheduling_parameters(param_hashes)
+ set_user_from_auth :admin
+ containers = {}
+ requests = []
+ param_hashes.each do |scheduling_parameters|
+ container, request = minimal_new(scheduling_parameters: scheduling_parameters)
+ containers[container.uuid] = container
+ requests << request
+ end
+ refute(containers.empty?, "buggy test: no scheduling parameters enumerated")
+ assert_equal(1, containers.length)
+ _, container1 = containers.shift
+ container1.lock
+ container1.update!(state: Container::Cancelled)
+ container1.reload
+ request1 = requests.shift
+ request1.reload
+ assert_not_equal(container1.uuid, request1.container_uuid)
+ requests.each do |request|
+ request.reload
+ assert_equal(request1.container_uuid, request.container_uuid)
+ end
+ container2 = Container.find_by_uuid(request1.container_uuid)
+ assert_not_nil(container2)
+ return container2
+ end
+
+ preemptible_values = [true, false, nil]
+ preemptible_values.permutation(1).chain(
+ preemptible_values.product(preemptible_values),
+ preemptible_values.product(preemptible_values, preemptible_values),
+ ).each do |preemptible_a|
+ # If the first req has preemptible=true but a subsequent req
+ # doesn't, we want to avoid reusing the first container, so this
+ # test isn't appropriate.
+ next if preemptible_a[0] &&
+ ((preemptible_a.length > 1 && !preemptible_a[1]) ||
+ (preemptible_a.length > 2 && !preemptible_a[2]))
+ test "retry requests scheduled with preemptible=#{preemptible_a}" do
+ configure_preemptible_instance_type
+ param_hashes = vary_parameters(preemptible: preemptible_a)
+ container = retry_with_scheduling_parameters(param_hashes)
+ assert_equal(preemptible_a.all?,
+ container.scheduling_parameters["preemptible"] || false)
+ end
+ end
+
+ partition_values = [nil, [], ["alpha"], ["alpha", "bravo"], ["bravo", "charlie"]]
+ partition_values.permutation(1).chain(
+ partition_values.permutation(2),
+ ).each do |partitions_a|
+ test "retry requests scheduled with partitions=#{partitions_a}" do
+ param_hashes = vary_parameters(partitions: partitions_a)
+ container = retry_with_scheduling_parameters(param_hashes)
+ expected = if partitions_a.any? { |value| value.nil? or value.empty? }
+ []
+ else
+ partitions_a.flatten.uniq
+ end
+ actual = container.scheduling_parameters["partitions"] || []
+ assert_equal(expected.sort, actual.sort)
+ end
+ end
+
+ runtime_values = [nil, 0, 1, 2, 3]
+ runtime_values.permutation(1).chain(
+ runtime_values.permutation(2),
+ runtime_values.permutation(3),
+ ).each do |max_run_time_a|
+ test "retry requests scheduled with max_run_time=#{max_run_time_a}" do
+ param_hashes = vary_parameters(max_run_time: max_run_time_a)
+ container = retry_with_scheduling_parameters(param_hashes)
+ expected = if max_run_time_a.any? { |value| value.nil? or value == 0 }
+ 0
+ else
+ max_run_time_a.max
+ end
+ actual = container.scheduling_parameters["max_run_time"] || 0
+ assert_equal(expected, actual)
+ end
+ end
+
+ test "retry requests with multi-varied scheduling parameters" do
+ configure_preemptible_instance_type
+ param_hashes = [{
+ "partitions": ["alpha", "bravo"],
+ "preemptible": false,
+ "max_run_time": 10,
+ }, {
+ "partitions": ["alpha", "charlie"],
+ "max_run_time": 20,
+ }, {
+ "partitions": ["bravo", "charlie"],
+ "preemptible": true,
+ "max_run_time": 30,
+ }]
+ container = retry_with_scheduling_parameters(param_hashes)
+ actual = container.scheduling_parameters
+ assert_equal(["alpha", "bravo", "charlie"], actual["partitions"]&.sort)
+ assert_equal(false, actual["preemptible"] || false)
+ assert_equal(30, actual["max_run_time"])
+ end
+
+ test "retry requests with unset scheduling parameters" do
+ configure_preemptible_instance_type
+ param_hashes = vary_parameters(
+ preemptible: [nil, true],
+ partitions: [nil, ["alpha"]],
+ max_run_time: [nil, 5],
+ )
+ container = retry_with_scheduling_parameters(param_hashes)
+ actual = container.scheduling_parameters
+ assert_equal([], actual["partitions"] || [])
+ assert_equal(false, actual["preemptible"] || false)
+ assert_equal(0, actual["max_run_time"] || 0)
+ end
+
+ test "retry requests with default scheduling parameters" do
+ configure_preemptible_instance_type
+ param_hashes = vary_parameters(
+ preemptible: [false, true],
+ partitions: [[], ["bravo"]],
+ max_run_time: [0, 1],
+ )
+ container = retry_with_scheduling_parameters(param_hashes)
+ actual = container.scheduling_parameters
+ assert_equal([], actual["partitions"] || [])
+ assert_equal(false, actual["preemptible"] || false)
+ assert_equal(0, actual["max_run_time"] || 0)
+ end
+
+ def run_container(request_params, final_attrs)
+ final_attrs[:state] ||= Container::Complete
+ if final_attrs[:state] == Container::Complete
+ final_attrs[:exit_code] ||= 0
+ final_attrs[:log] ||= collections(:log_collection).portable_data_hash
+ final_attrs[:output] ||= collections(:multilevel_collection_1).portable_data_hash
+ end
+ container, request = minimal_new(request_params)
+ container.lock
+ container.update!(state: Container::Running)
+ container.update!(final_attrs)
+ return container, request
+ end
+
+ def check_reuse_with_variations(default_keep_cache_ram, vary_attr, start_value, variations)
+ container_params = REUSABLE_ATTRS_SLIM.merge(vary_attr => start_value)
+ orig_default = Rails.configuration.Containers.DefaultKeepCacheRAM
+ begin
+ Rails.configuration.Containers.DefaultKeepCacheRAM = default_keep_cache_ram
+ set_user_from_auth :admin
+ expected, _ = run_container(container_params, {})
+ variations.each do |variation|
+ full_variation = REUSABLE_ATTRS_SLIM[vary_attr].merge(variation)
+ parameters = REUSABLE_ATTRS_SLIM.merge(vary_attr => full_variation)
+ actual = Container.find_reusable(parameters)
+ assert_equal(expected.uuid, actual&.uuid,
+ "request with #{vary_attr}=#{variation} did not reuse container")
+ end
+ ensure
+ Rails.configuration.Containers.DefaultKeepCacheRAM = orig_default
+ end
+ end
+
+ # Test that we can reuse a container with a known keep_cache_ram constraint,
+ # no matter what keep_cache_* constraints the new request uses.
+ [0, 2 << 30, 4 << 30].product(
+ [0, 1],
+ [true, false],
+ ).each do |(default_keep_cache_ram, multiplier, keep_disk_constraint)|
+ test "reuse request with DefaultKeepCacheRAM=#{default_keep_cache_ram}, keep_cache_ram*=#{multiplier}, keep_cache_disk=#{keep_disk_constraint}" do
+ runtime_constraints = REUSABLE_ATTRS_SLIM[:runtime_constraints].merge(
+ "keep_cache_ram" => default_keep_cache_ram * multiplier,
+ )
+ if not keep_disk_constraint
+ # Simulate a container that predates keep_cache_disk by deleting
+ # the constraint entirely.
+ runtime_constraints.delete("keep_cache_disk")
+ end
+ # Important values are:
+ # * 0
+ # * 2GiB, the minimum default keep_cache_disk
+ # * 8GiB, the default keep_cache_disk based on container ram
+ # * 32GiB, the maximum default keep_cache_disk
+ # Check these values and values in between.
+ vary_values = [0, 1, 2, 6, 8, 10, 32, 33].map { |v| v << 30 }.to_a
+ variations = vary_parameters(keep_cache_ram: vary_values)
+ .chain(vary_parameters(keep_cache_disk: vary_values))
+ check_reuse_with_variations(
+ default_keep_cache_ram,
+ :runtime_constraints,
+ runtime_constraints,
+ variations,
+ )
+ end
+ end
+
+ # Test that we can reuse a container with a known keep_cache_disk constraint,
+ # no matter what keep_cache_* constraints the new request uses.
+ # keep_cache_disk values are the important values discussed in the test above.
+ [0, 2 << 30, 4 << 30]
+ .product([0, 2 << 30, 8 << 30, 32 << 30])
+ .each do |(default_keep_cache_ram, keep_cache_disk)|
+ test "reuse request with DefaultKeepCacheRAM=#{default_keep_cache_ram} and keep_cache_disk=#{keep_cache_disk}" do
+ runtime_constraints = REUSABLE_ATTRS_SLIM[:runtime_constraints].merge(
+ "keep_cache_disk" => keep_cache_disk,
+ )
+ vary_values = [0, 1, 2, 6, 8, 10, 32, 33].map { |v| v << 30 }.to_a
+ variations = vary_parameters(keep_cache_ram: vary_values)
+ .chain(vary_parameters(keep_cache_disk: vary_values))
+ check_reuse_with_variations(
+ default_keep_cache_ram,
+ :runtime_constraints,
+ runtime_constraints,
+ variations,
+ )
+ end
+ end
+
+ # Test that a container request can reuse a container with an exactly
+ # matching keep_cache_* constraint, no matter what the defaults.
+ [0, 2 << 30, 4 << 30].product(
+ ["keep_cache_disk", "keep_cache_ram"],
+ [135790, 13 << 30, 135 << 30],
+ ).each do |(default_keep_cache_ram, constraint_key, constraint_value)|
+ test "reuse request with #{constraint_key}=#{constraint_value} and DefaultKeepCacheRAM=#{default_keep_cache_ram}" do
+ runtime_constraints = REUSABLE_ATTRS_SLIM[:runtime_constraints].merge(
+ constraint_key => constraint_value,
+ )
+ check_reuse_with_variations(
+ default_keep_cache_ram,
+ :runtime_constraints,
+ runtime_constraints,
+ [runtime_constraints],
+ )
+ end
+ end
end
diff --git a/services/api/test/unit/create_superuser_token_test.rb b/services/api/test/unit/create_superuser_token_test.rb
index 3c6dcbdbbc..86ba78cb99 100644
--- a/services/api/test/unit/create_superuser_token_test.rb
+++ b/services/api/test/unit/create_superuser_token_test.rb
@@ -54,7 +54,7 @@ class CreateSuperUserTokenTest < ActiveSupport::TestCase
apiClientAuth = ApiClientAuthorization.where(api_token: 'atesttoken').first
refute_nil apiClientAuth
Thread.current[:user] = users(:admin)
- apiClientAuth.update_attributes expires_at: '2000-10-10'
+ apiClientAuth.update expires_at: '2000-10-10'
token2 = create_superuser_token
assert_not_nil token2
diff --git a/services/api/test/unit/group_test.rb b/services/api/test/unit/group_test.rb
index a3bcd4e356..36f42006ff 100644
--- a/services/api/test/unit/group_test.rb
+++ b/services/api/test/unit/group_test.rb
@@ -82,7 +82,7 @@ class GroupTest < ActiveSupport::TestCase
set_user_from_auth :active_trustedclient
g = Group.create!(name: "foo", group_class: "role")
assert_raises(ActiveRecord::RecordInvalid) do
- g.update_attributes!(group_class: "project")
+ g.update!(group_class: "project")
end
end
@@ -95,7 +95,7 @@ class GroupTest < ActiveSupport::TestCase
c = Collection.create!(name: "bzzz124")
assert_raises(ArvadosModel::PermissionDeniedError) do
- c.update_attributes!(owner_uuid: role.uuid)
+ c.update!(owner_uuid: role.uuid)
end
end
@@ -336,7 +336,7 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# Cannot set frozen_by_uuid to a different user
assert_raises do
- proj.update_attributes!(frozen_by_uuid: users(:spectator).uuid)
+ proj.update!(frozen_by_uuid: users(:spectator).uuid)
end
proj.reload
@@ -348,7 +348,7 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# First confirm we have write permission
assert Collection.create(name: 'bar', owner_uuid: proj.uuid)
assert_raises(ArvadosModel::PermissionDeniedError) do
- proj.update_attributes!(frozen_by_uuid: users(:spectator).uuid)
+ proj.update!(frozen_by_uuid: users(:spectator).uuid)
end
end
proj.reload
@@ -356,12 +356,12 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# Cannot set frozen_by_uuid without description (if so configured)
Rails.configuration.API.FreezeProjectRequiresDescription = true
err = assert_raises do
- proj.update_attributes!(frozen_by_uuid: users(:active).uuid)
+ proj.update!(frozen_by_uuid: users(:active).uuid)
end
assert_match /can only be set if description is non-empty/, err.inspect
proj.reload
err = assert_raises do
- proj.update_attributes!(frozen_by_uuid: users(:active).uuid, description: '')
+ proj.update!(frozen_by_uuid: users(:active).uuid, description: '')
end
assert_match /can only be set if description is non-empty/, err.inspect
proj.reload
@@ -369,7 +369,7 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# Cannot set frozen_by_uuid without properties (if so configured)
Rails.configuration.API.FreezeProjectRequiresProperties['frobity'] = true
err = assert_raises do
- proj.update_attributes!(
+ proj.update!(
frozen_by_uuid: users(:active).uuid,
description: 'ready to freeze')
end
@@ -379,20 +379,20 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# Cannot set frozen_by_uuid while project or its parent is
# trashed
[parent, proj].each do |trashed|
- trashed.update_attributes!(trash_at: db_current_time)
+ trashed.update!(trash_at: db_current_time)
err = assert_raises do
- proj.update_attributes!(
+ proj.update!(
frozen_by_uuid: users(:active).uuid,
description: 'ready to freeze',
properties: {'frobity' => 'bar baz'})
end
assert_match /cannot be set on a trashed project/, err.inspect
proj.reload
- trashed.update_attributes!(trash_at: nil)
+ trashed.update!(trash_at: nil)
end
# Can set frozen_by_uuid if all conditions are met
- ok = proj.update_attributes(
+ ok = proj.update(
frozen_by_uuid: users(:active).uuid,
description: 'ready to freeze',
properties: {'frobity' => 'bar baz'})
@@ -404,7 +404,7 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# its descendants
[proj, proj_inner].each do |frozen|
assert_raises do
- collections(:collection_owned_by_active).update_attributes!(owner_uuid: frozen.uuid)
+ collections(:collection_owned_by_active).update!(owner_uuid: frozen.uuid)
end
assert_raises do
Collection.create!(owner_uuid: frozen.uuid, name: 'inside-frozen-project')
@@ -427,31 +427,31 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# trash, or delete the project or anything beneath it
[proj, proj_inner, coll].each do |frozen|
assert_raises(StandardError, "should reject rename of #{frozen.uuid} (#{frozen.name}) with parent #{frozen.owner_uuid}") do
- frozen.update_attributes!(name: 'foo2')
+ frozen.update!(name: 'foo2')
end
frozen.reload
if frozen.is_a?(Collection)
assert_raises(StandardError, "should reject manifest change of #{frozen.uuid}") do
- frozen.update_attributes!(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n")
+ frozen.update!(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n")
end
else
assert_raises(StandardError, "should reject moving a project into #{frozen.uuid}") do
- groups(:private).update_attributes!(owner_uuid: frozen.uuid)
+ groups(:private).update!(owner_uuid: frozen.uuid)
end
end
frozen.reload
assert_raises(StandardError, "should reject moving #{frozen.uuid} to a different parent project") do
- frozen.update_attributes!(owner_uuid: groups(:private).uuid)
+ frozen.update!(owner_uuid: groups(:private).uuid)
end
frozen.reload
assert_raises(StandardError, "should reject setting trash_at of #{frozen.uuid}") do
- frozen.update_attributes!(trash_at: db_current_time)
+ frozen.update!(trash_at: db_current_time)
end
frozen.reload
assert_raises(StandardError, "should reject setting delete_at of #{frozen.uuid}") do
- frozen.update_attributes!(delete_at: db_current_time)
+ frozen.update!(delete_at: db_current_time)
end
frozen.reload
assert_raises(StandardError, "should reject delete of #{frozen.uuid}") do
@@ -470,35 +470,35 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
# First confirm we have write permission on the parent project
assert Collection.create(name: 'bar', owner_uuid: parent.uuid)
assert_raises(ArvadosModel::PermissionDeniedError) do
- proj.update_attributes!(frozen_by_uuid: nil)
+ proj.update!(frozen_by_uuid: nil)
end
end
proj.reload
# User with manage permission can unfreeze, then create items
# inside it and its children
- assert proj.update_attributes(frozen_by_uuid: nil)
+ assert proj.update(frozen_by_uuid: nil)
assert Collection.create!(owner_uuid: proj.uuid, name: 'inside-unfrozen-project')
assert Collection.create!(owner_uuid: proj_inner.uuid, name: 'inside-inner-unfrozen-project')
# Re-freeze, and reconfigure so only admins can unfreeze.
- assert proj.update_attributes(frozen_by_uuid: users(:active).uuid)
+ assert proj.update(frozen_by_uuid: users(:active).uuid)
Rails.configuration.API.UnfreezeProjectRequiresAdmin = true
# Owner cannot unfreeze, because not admin.
err = assert_raises do
- proj.update_attributes!(frozen_by_uuid: nil)
+ proj.update!(frozen_by_uuid: nil)
end
assert_match /can only be changed by an admin user, once set/, err.inspect
proj.reload
# Cannot trash or delete a frozen project's ancestor
assert_raises(StandardError, "should not be able to set trash_at on parent of frozen project") do
- parent.update_attributes!(trash_at: db_current_time)
+ parent.update!(trash_at: db_current_time)
end
parent.reload
assert_raises(StandardError, "should not be able to set delete_at on parent of frozen project") do
- parent.update_attributes!(delete_at: db_current_time)
+ parent.update!(delete_at: db_current_time)
end
parent.reload
assert_nil parent.frozen_by_uuid
@@ -506,13 +506,13 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
act_as_user users(:admin) do
# Even admin cannot change frozen_by_uuid to someone else's UUID.
err = assert_raises do
- proj.update_attributes!(frozen_by_uuid: users(:project_viewer).uuid)
+ proj.update!(frozen_by_uuid: users(:project_viewer).uuid)
end
assert_match /can only be set to the current user's UUID/, err.inspect
proj.reload
# Admin can unfreeze.
- assert proj.update_attributes(frozen_by_uuid: nil), proj.errors.messages
+ assert proj.update(frozen_by_uuid: nil), proj.errors.messages
end
# Cannot freeze a project if it contains container requests in
@@ -521,15 +521,36 @@ update links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'
creq_uncommitted = ContainerRequest.create!(test_cr_attrs.merge(owner_uuid: proj_inner.uuid))
creq_committed = ContainerRequest.create!(test_cr_attrs.merge(owner_uuid: proj_inner.uuid, state: 'Committed'))
err = assert_raises do
- proj.update_attributes!(frozen_by_uuid: users(:active).uuid)
+ proj.update!(frozen_by_uuid: users(:active).uuid)
end
assert_match /container request zzzzz-xvhdp-.* with state = Committed/, err.inspect
proj.reload
# Can freeze once all container requests are in Uncommitted or
# Final state
- creq_committed.update_attributes!(state: ContainerRequest::Final)
- assert proj.update_attributes(frozen_by_uuid: users(:active).uuid)
+ creq_committed.update!(state: ContainerRequest::Final)
+ assert proj.update(frozen_by_uuid: users(:active).uuid)
+ end
+ end
+
+ [
+ [false, :admin, true],
+ [false, :active, false],
+ [true, :admin, true],
+ [true, :active, true],
+ [true, :inactive, false],
+ ].each do |conf, user, allowed|
+ test "config.Users.CanCreateRoleGroups conf=#{conf}, user=#{user}" do
+ Rails.configuration.Users.CanCreateRoleGroups = conf
+ act_as_user users(user) do
+ if allowed
+ Group.create!(name: 'admin-created', group_class: 'role')
+ else
+ assert_raises(ArvadosModel::PermissionDeniedError) do
+ Group.create!(name: 'user-created', group_class: 'role')
+ end
+ end
+ end
end
end
end
diff --git a/services/api/test/unit/link_test.rb b/services/api/test/unit/link_test.rb
index c7d21bdc4d..b9806486ad 100644
--- a/services/api/test/unit/link_test.rb
+++ b/services/api/test/unit/link_test.rb
@@ -93,4 +93,42 @@ class LinkTest < ActiveSupport::TestCase
refute new_active_link_valid?(tail_uuid: groups(:public).uuid,
head_uuid: collections(:w_a_z_file_version_1).uuid)
end
+
+ def create_overlapping_permissions(names=[], attrs={})
+ names.map do |name|
+ link = Link.create!({
+ link_class: "tmp",
+ tail_uuid: users(:active).uuid,
+ head_uuid: collections(:baz_file).uuid,
+ name: name,
+ }.merge(attrs).merge({name: name}))
+ ActiveRecord::Base.connection.execute "update links set link_class='permission' where uuid='#{link.uuid}'"
+ link.uuid
+ end
+ end
+
+ test "updating permission causes any conflicting links to be deleted" do
+ link1, link2 = create_overlapping_permissions(['can_read', 'can_manage'])
+ Link.find_by_uuid(link2).update!(name: 'can_write')
+ assert_empty Link.where(uuid: link1)
+ end
+
+ test "deleting permission causes any conflicting links to be deleted" do
+ rlink, wlink = create_overlapping_permissions(['can_read', 'can_write'])
+ Link.find_by_uuid(wlink).destroy
+ assert_empty Link.where(uuid: rlink)
+ end
+
+ test "updating login permission causes any conflicting links to be deleted" do
+ link1, link2 = create_overlapping_permissions(['can_login', 'can_login'], {properties: {username: 'foo1'}})
+ Link.find_by_uuid(link1).update!(properties: {'username' => 'foo2'})
+ Link.find_by_uuid(link2).update!(properties: {'username' => 'foo2'})
+ assert_empty Link.where(uuid: link1)
+ end
+
+ test "deleting login permission causes any conflicting links to be deleted" do
+ link1, link2 = create_overlapping_permissions(['can_login', 'can_login'], {properties: {username: 'foo1'}})
+ Link.find_by_uuid(link1).destroy
+ assert_empty Link.where(uuid: link2)
+ end
end
diff --git a/services/api/test/unit/log_test.rb b/services/api/test/unit/log_test.rb
index 66c8c8d923..d3a1b618d5 100644
--- a/services/api/test/unit/log_test.rb
+++ b/services/api/test/unit/log_test.rb
@@ -319,7 +319,7 @@ class LogTest < ActiveSupport::TestCase
assert_logged(coll, :create) do |props|
assert_equal(txt, props['new_attributes']['manifest_text'])
end
- coll.update_attributes!(name: "testing")
+ coll.update!(name: "testing")
assert_logged(coll, :update) do |props|
assert_equal(txt, props['old_attributes']['manifest_text'])
assert_equal(txt, props['new_attributes']['manifest_text'])
diff --git a/services/api/test/unit/node_test.rb b/services/api/test/unit/node_test.rb
deleted file mode 100644
index 9fa3febe1e..0000000000
--- a/services/api/test/unit/node_test.rb
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'tmpdir'
-require 'tempfile'
-
-class NodeTest < ActiveSupport::TestCase
- def ping_node(node_name, ping_data)
- set_user_from_auth :admin
- node = nodes(node_name)
- node.ping({ping_secret: node.info['ping_secret'],
- ip: node.ip_address}.merge(ping_data))
- node
- end
-
- test "pinging a node can add and update stats" do
- node = ping_node(:idle, {total_cpu_cores: '12', total_ram_mb: '512'})
- assert_equal(12, node.properties['total_cpu_cores'])
- assert_equal(512, node.properties['total_ram_mb'])
- end
-
- test "stats disappear if not in a ping" do
- node = ping_node(:idle, {total_ram_mb: '256'})
- refute_includes(node.properties, 'total_cpu_cores')
- assert_equal(256, node.properties['total_ram_mb'])
- end
-
- test "worker state is down for node with no slot" do
- node = nodes(:was_idle_now_down)
- assert_nil node.slot_number, "fixture is not what I expected"
- assert_equal 'down', node.crunch_worker_state, "wrong worker state"
- end
-
- test "dns_server_conf_template" do
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp'
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template'
- conffile = Rails.root.join 'tmp', 'compute65535.conf'
- File.unlink conffile rescue nil
- assert Node.dns_server_update 'compute65535', '127.0.0.1'
- assert_match(/\"1\.0\.0\.127\.in-addr\.arpa\. IN PTR compute65535\.zzzzz\.arvadosapi\.com\"/, IO.read(conffile))
- File.unlink conffile
- end
-
- test "dns_server_restart_command" do
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp'
- Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar'
- restartfile = Rails.root.join 'tmp', 'restart.txt'
- File.unlink restartfile rescue nil
- assert Node.dns_server_update 'compute65535', '127.0.0.127'
- assert_equal "foobar\n", IO.read(restartfile)
- File.unlink restartfile
- end
-
- test "dns_server_restart_command fail" do
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp', 'bogusdir'
- Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar'
- refute Node.dns_server_update 'compute65535', '127.0.0.127'
- end
-
- test "dns_server_update_command with valid command" do
- testfile = Rails.root.join('tmp', 'node_test_dns_server_update_command.txt')
- Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand =
- ('echo -n "%{hostname} == %{ip_address}" >' +
- testfile.to_s.shellescape)
- assert Node.dns_server_update 'compute65535', '127.0.0.1'
- assert_equal 'compute65535 == 127.0.0.1', IO.read(testfile)
- File.unlink testfile
- end
-
- test "dns_server_update_command with failing command" do
- Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = 'false %{hostname}'
- refute Node.dns_server_update 'compute65535', '127.0.0.1'
- end
-
- test "dns update with no commands/dirs configured" do
- Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = ""
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = ""
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = 'ignored!'
- Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'ignored!'
- assert Node.dns_server_update 'compute65535', '127.0.0.127'
- end
-
- test "don't leave temp files behind if there's an error writing them" do
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template'
- Tempfile.any_instance.stubs(:puts).raises(IOError)
- Dir.mktmpdir do |tmpdir|
- Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = tmpdir
- refute Node.dns_server_update 'compute65535', '127.0.0.127'
- assert_empty Dir.entries(tmpdir).select{|f| File.file? f}
- end
- end
-
- test "ping new node with no hostname and default config" do
- node = ping_node(:new_with_no_hostname, {})
- slot_number = node.slot_number
- refute_nil slot_number
- assert_equal("compute#{slot_number}", node.hostname)
- end
-
- test "ping new node with no hostname and no config" do
- Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false
- node = ping_node(:new_with_no_hostname, {})
- refute_nil node.slot_number
- assert_nil node.hostname
- end
-
- test "ping new node with zero padding config" do
- Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%04d'
- node = ping_node(:new_with_no_hostname, {})
- slot_number = node.slot_number
- refute_nil slot_number
- assert_equal("compute000#{slot_number}", node.hostname)
- end
-
- test "ping node with hostname and config and expect hostname unchanged" do
- node = ping_node(:new_with_custom_hostname, {})
- assert_equal(23, node.slot_number)
- assert_equal("custom1", node.hostname)
- end
-
- test "ping node with hostname and no config and expect hostname unchanged" do
- Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false
- node = ping_node(:new_with_custom_hostname, {})
- assert_equal(23, node.slot_number)
- assert_equal("custom1", node.hostname)
- end
-
- # Ping two nodes: one without a hostname and the other with a hostname.
- # Verify that the first one gets a hostname and second one is unchanged.
- test "ping two nodes one with no hostname and one with hostname and check hostnames" do
- # ping node with no hostname and expect it set with config format
- node = ping_node(:new_with_no_hostname, {})
- refute_nil node.slot_number
- assert_equal "compute#{node.slot_number}", node.hostname
-
- # ping node with a hostname and expect it to be unchanged
- node2 = ping_node(:new_with_custom_hostname, {})
- refute_nil node2.slot_number
- assert_equal "custom1", node2.hostname
- end
-
- test "update dns when hostname and ip_address are cleared" do
- act_as_system_user do
- node = ping_node(:new_with_custom_hostname, {})
- Node.expects(:dns_server_update).with(node.hostname, Node::UNUSED_NODE_IP)
- node.update_attributes(hostname: nil, ip_address: nil)
- end
- end
-
- test "update dns when hostname changes" do
- act_as_system_user do
- node = ping_node(:new_with_custom_hostname, {})
-
- Node.expects(:dns_server_update).with(node.hostname, Node::UNUSED_NODE_IP)
- Node.expects(:dns_server_update).with('foo0', node.ip_address)
- node.update_attributes!(hostname: 'foo0')
-
- Node.expects(:dns_server_update).with('foo0', Node::UNUSED_NODE_IP)
- node.update_attributes!(hostname: nil, ip_address: nil)
-
- Node.expects(:dns_server_update).with('foo0', '10.11.12.13')
- node.update_attributes!(hostname: 'foo0', ip_address: '10.11.12.13')
-
- Node.expects(:dns_server_update).with('foo0', '10.11.12.14')
- node.update_attributes!(hostname: 'foo0', ip_address: '10.11.12.14')
- end
- end
-
- test 'newest ping wins IP address conflict' do
- act_as_system_user do
- n1, n2 = Node.create!, Node.create!
-
- n1.ping(ip: '10.5.5.5', ping_secret: n1.info['ping_secret'])
- n1.reload
-
- Node.expects(:dns_server_update).with(n1.hostname, Node::UNUSED_NODE_IP)
- Node.expects(:dns_server_update).with(Not(equals(n1.hostname)), '10.5.5.5')
- n2.ping(ip: '10.5.5.5', ping_secret: n2.info['ping_secret'])
-
- n1.reload
- n2.reload
- assert_nil n1.ip_address
- assert_equal '10.5.5.5', n2.ip_address
-
- Node.expects(:dns_server_update).with(n2.hostname, Node::UNUSED_NODE_IP)
- Node.expects(:dns_server_update).with(n1.hostname, '10.5.5.5')
- n1.ping(ip: '10.5.5.5', ping_secret: n1.info['ping_secret'])
-
- n1.reload
- n2.reload
- assert_nil n2.ip_address
- assert_equal '10.5.5.5', n1.ip_address
- end
- end
-
- test 'run out of slots' do
- Rails.configuration.Containers.MaxComputeVMs = 3
- act_as_system_user do
- Node.destroy_all
- (1..4).each do |i|
- n = Node.create!
- args = { ip: "10.0.0.#{i}", ping_secret: n.info['ping_secret'] }
- if i <= Rails.configuration.Containers.MaxComputeVMs
- n.ping(args)
- else
- assert_raises do
- n.ping(args)
- end
- end
- end
- end
- end
-end
diff --git a/services/api/test/unit/owner_test.rb b/services/api/test/unit/owner_test.rb
index aa0ac5f361..1c1bd93b81 100644
--- a/services/api/test/unit/owner_test.rb
+++ b/services/api/test/unit/owner_test.rb
@@ -63,7 +63,7 @@ class OwnerTest < ActiveSupport::TestCase
assert(Specimen.where(uuid: i.uuid).any?,
"new item should really be in DB")
- assert(i.update_attributes(owner_uuid: new_o.uuid),
+ assert(i.update(owner_uuid: new_o.uuid),
"should change owner_uuid from #{o.uuid} to #{new_o.uuid}")
end
end
@@ -92,7 +92,7 @@ class OwnerTest < ActiveSupport::TestCase
"new #{o_class} should really be in DB")
old_uuid = o.uuid
new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
- assert(o.update_attributes(uuid: new_uuid),
+ assert(o.update(uuid: new_uuid),
"should change #{o_class} uuid from #{old_uuid} to #{new_uuid}")
assert_equal(false, o_class.where(uuid: old_uuid).any?,
"#{old_uuid} should disappear when renamed to #{new_uuid}")
@@ -118,7 +118,7 @@ class OwnerTest < ActiveSupport::TestCase
assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
"need something to be owned by #{o.uuid} for this test")
new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
- assert(!o.update_attributes(uuid: new_uuid),
+ assert(!o.update(uuid: new_uuid),
"should not change uuid of #{ofixt} that owns objects")
end
end
@@ -126,7 +126,7 @@ class OwnerTest < ActiveSupport::TestCase
test "delete User that owns self" do
o = User.create!
assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
- assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+ assert_equal(true, o.update(owner_uuid: o.uuid),
"setting owner to self should work")
skip_check_permissions_against_full_refresh do
diff --git a/services/api/test/unit/permission_test.rb b/services/api/test/unit/permission_test.rb
index efc43dfde5..14c810d81a 100644
--- a/services/api/test/unit/permission_test.rb
+++ b/services/api/test/unit/permission_test.rb
@@ -84,7 +84,7 @@ class PermissionTest < ActiveSupport::TestCase
assert users(:active).can?(write: ob)
assert users(:active).can?(read: ob)
- l1.update_attributes!(name: 'can_read')
+ l1.update!(name: 'can_read')
assert !users(:active).can?(write: ob)
assert users(:active).can?(read: ob)
@@ -293,7 +293,7 @@ class PermissionTest < ActiveSupport::TestCase
"manager saw the minion's private stuff")
assert_raises(ArvadosModel::PermissionDeniedError,
"manager could update minion's private stuff") do
- minions_specimen.update_attributes(properties: {'x' => 'y'})
+ minions_specimen.update(properties: {'x' => 'y'})
end
end
@@ -310,7 +310,7 @@ class PermissionTest < ActiveSupport::TestCase
.where(uuid: minions_specimen.uuid),
"manager could not find minion's specimen by uuid")
assert_equal(true,
- minions_specimen.update_attributes(properties: {'x' => 'y'}),
+ minions_specimen.update(properties: {'x' => 'y'}),
"manager could not update minion's specimen object")
end
end
@@ -355,17 +355,17 @@ class PermissionTest < ActiveSupport::TestCase
"OTHER can see #{u.first_name} in the user list")
act_as_user u do
assert_raises ArvadosModel::PermissionDeniedError, "wrote without perm" do
- other.update_attributes!(prefs: {'pwned' => true})
+ other.update!(prefs: {'pwned' => true})
end
- assert_equal(true, u.update_attributes!(prefs: {'thisisme' => true}),
+ assert_equal(true, u.update!(prefs: {'thisisme' => true}),
"#{u.first_name} can't update its own prefs")
end
act_as_user other do
assert_raises(ArvadosModel::PermissionDeniedError,
"OTHER wrote #{u.first_name} without perm") do
- u.update_attributes!(prefs: {'pwned' => true})
+ u.update!(prefs: {'pwned' => true})
end
- assert_equal(true, other.update_attributes!(prefs: {'thisisme' => true}),
+ assert_equal(true, other.update!(prefs: {'thisisme' => true}),
"OTHER can't update its own prefs")
end
end
@@ -382,7 +382,7 @@ class PermissionTest < ActiveSupport::TestCase
set_user_from_auth :rominiadmin
ob = Collection.create!
assert_raises ArvadosModel::PermissionDeniedError, "changed owner to unwritable user" do
- ob.update_attributes!(owner_uuid: users(:active).uuid)
+ ob.update!(owner_uuid: users(:active).uuid)
end
end
@@ -397,7 +397,7 @@ class PermissionTest < ActiveSupport::TestCase
set_user_from_auth :rominiadmin
ob = Collection.create!
assert_raises ArvadosModel::PermissionDeniedError, "changed owner to unwritable group" do
- ob.update_attributes!(owner_uuid: groups(:aproject).uuid)
+ ob.update!(owner_uuid: groups(:aproject).uuid)
end
end
@@ -529,7 +529,13 @@ class PermissionTest < ActiveSupport::TestCase
assert users(:active).can?(write: col.uuid)
assert users(:active).can?(manage: col.uuid)
- l3.destroy!
+ # Creating l3 should have automatically deleted l1 and upgraded to
+ # the max permission of {l1, l3}, i.e., can_manage (see #18693) so
+ # there should be no can_read link now.
+ refute Link.where(tail_uuid: l3.tail_uuid,
+ head_uuid: l3.head_uuid,
+ link_class: 'permission',
+ name: 'can_read').any?
assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first
assert users(:active).can?(read: col.uuid)
@@ -575,7 +581,13 @@ class PermissionTest < ActiveSupport::TestCase
assert users(:active).can?(write: prj.uuid)
assert users(:active).can?(manage: prj.uuid)
- l3.destroy!
+ # Creating l3 should have automatically deleted l0 and upgraded to
+ # the max permission of {l0, l3}, i.e., can_manage (see #18693) so
+ # there should be no can_read link now.
+ refute Link.where(tail_uuid: l3.tail_uuid,
+ head_uuid: l3.head_uuid,
+ link_class: 'permission',
+ name: 'can_read').any?
assert Group.readable_by(users(:active)).where(uuid: prj.uuid).first
assert users(:active).can?(read: prj.uuid)
diff --git a/services/api/test/unit/repository_test.rb b/services/api/test/unit/repository_test.rb
index cb562ef977..674a34ffd8 100644
--- a/services/api/test/unit/repository_test.rb
+++ b/services/api/test/unit/repository_test.rb
@@ -263,7 +263,7 @@ class RepositoryTest < ActiveSupport::TestCase
test "non-admin can rename own repo" do
act_as_user users(:active) do
- assert repositories(:foo).update_attributes(name: 'active/foo12345')
+ assert repositories(:foo).update(name: 'active/foo12345')
end
end
diff --git a/services/api/test/unit/update_priority_test.rb b/services/api/test/unit/update_priority_test.rb
deleted file mode 100644
index c1f60d91d0..0000000000
--- a/services/api/test/unit/update_priority_test.rb
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-require 'test_helper'
-require 'update_priority'
-
-class UpdatePriorityTest < ActiveSupport::TestCase
- test 'priority 0 but should be >0' do
- uuid = containers(:running).uuid
- ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
- assert_equal 0, Container.find_by_uuid(uuid).priority
- UpdatePriority.update_priority(nolock: true)
- assert_operator 0, :<, Container.find_by_uuid(uuid).priority
-
- uuid = containers(:queued).uuid
- ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
- assert_equal 0, Container.find_by_uuid(uuid).priority
- UpdatePriority.update_priority(nolock: true)
- assert_operator 0, :<, Container.find_by_uuid(uuid).priority
- end
-
- test 'priority>0 but should be 0' do
- uuid = containers(:running).uuid
- ActiveRecord::Base.connection.exec_query('DELETE FROM container_requests WHERE container_uuid=$1', 'test-setup', [[nil, uuid]])
- assert_operator 0, :<, Container.find_by_uuid(uuid).priority
- UpdatePriority.update_priority(nolock: true)
- assert_equal 0, Container.find_by_uuid(uuid).priority
- end
-end
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
index 9a0e1dbf9c..810e5b45ec 100644
--- a/services/api/test/unit/user_test.rb
+++ b/services/api/test/unit/user_test.rb
@@ -153,12 +153,12 @@ class UserTest < ActiveSupport::TestCase
assert_equal("active/foo", repositories(:foo).name)
end
- [[false, 'foo@example.com', true, nil],
- [false, 'bar@example.com', nil, true],
- [true, 'foo@example.com', true, nil],
+ [[false, 'foo@example.com', true, false],
+ [false, 'bar@example.com', false, true],
+ [true, 'foo@example.com', true, false],
[true, 'bar@example.com', true, true],
- [false, '', nil, nil],
- [true, '', true, nil]
+ [false, '', false, false],
+ [true, '', true, false]
].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|
# In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.
test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do
@@ -166,7 +166,7 @@ class UserTest < ActiveSupport::TestCase
if auto_admin_first_user_config
# This test requires no admin users exist (except for the system user)
act_as_system_user do
- users(:admin).update_attributes!(is_admin: false)
+ users(:admin).update!(is_admin: false)
end
@all_users = User.where("uuid not like '%-000000000000000'").where(:is_admin => true)
assert_equal 0, @all_users.count, "No admin users should exist (except for the system user)"
@@ -347,10 +347,12 @@ class UserTest < ActiveSupport::TestCase
test "create new user with notifications" do
set_user_from_auth :admin
+ Rails.configuration.Users.AutoSetupNewUsers = false
+
create_user_and_verify_setup_and_notifications true, active_notify_list, inactive_notify_list, nil, nil
create_user_and_verify_setup_and_notifications true, active_notify_list, empty_notify_list, nil, nil
create_user_and_verify_setup_and_notifications true, empty_notify_list, empty_notify_list, nil, nil
- create_user_and_verify_setup_and_notifications false, active_notify_list, inactive_notify_list, nil, nil
+ create_user_and_verify_setup_and_notifications false, empty_notify_list, inactive_notify_list, nil, nil
create_user_and_verify_setup_and_notifications false, empty_notify_list, inactive_notify_list, nil, nil
create_user_and_verify_setup_and_notifications false, empty_notify_list, empty_notify_list, nil, nil
end
@@ -379,13 +381,13 @@ class UserTest < ActiveSupport::TestCase
[false, empty_notify_list, empty_notify_list, "arvados@example.com", false, false, "arvados2"],
[true, active_notify_list, inactive_notify_list, "arvados@example.com", false, false, "arvados2"],
[true, active_notify_list, inactive_notify_list, "root@example.com", true, false, "root2"],
- [false, active_notify_list, inactive_notify_list, "root@example.com", true, false, "root2"],
+ [false, active_notify_list, empty_notify_list, "root@example.com", true, false, "root2"],
[true, active_notify_list, inactive_notify_list, "roo_t@example.com", false, true, "root2"],
[false, empty_notify_list, empty_notify_list, "^^incorrect_format@example.com", true, true, "incorrectformat"],
[true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
[true, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
- [false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
- [false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
+ [false, active_notify_list, empty_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
+ [false, active_notify_list, empty_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
test "create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm} repo=#{auto_setup_repo}" do
set_user_from_auth :admin
@@ -465,7 +467,7 @@ class UserTest < ActiveSupport::TestCase
verify_user resp_user, email
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
if visible
@@ -499,7 +501,7 @@ class UserTest < ActiveSupport::TestCase
verify_user resp_user, email
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
@@ -522,7 +524,7 @@ class UserTest < ActiveSupport::TestCase
verify_user resp_user, email
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
@@ -534,7 +536,7 @@ class UserTest < ActiveSupport::TestCase
assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
@@ -550,7 +552,7 @@ class UserTest < ActiveSupport::TestCase
assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
- verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+ verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
@@ -625,7 +627,7 @@ class UserTest < ActiveSupport::TestCase
# check user setup
verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,
groups(:all_users).uuid, user.uuid,
- "permission", "can_read")
+ "permission", "can_write")
# Check for repository.
if named_repo = (prior_repo or
@@ -800,9 +802,15 @@ class UserTest < ActiveSupport::TestCase
test "empty identity_url saves as null" do
set_user_from_auth :admin
user = users(:active)
- assert user.update_attributes(identity_url: '')
+ assert user.update(identity_url: '')
user.reload
assert_nil user.identity_url
end
+ test "id overflows int32" do
+ uuid = users(:active).uuid
+ ActiveRecord::Base.connection.execute "update users set id=333222111000 where uuid='#{uuid}'"
+ u = User.find_by_uuid(uuid)
+ assert_equal 333222111000, u.id
+ end
end
diff --git a/services/api/test/unit/workflow_test.rb b/services/api/test/unit/workflow_test.rb
index 26cd7f215e..4b3e6095d9 100644
--- a/services/api/test/unit/workflow_test.rb
+++ b/services/api/test/unit/workflow_test.rb
@@ -60,7 +60,7 @@ class WorkflowTest < ActiveSupport::TestCase
definition = "k1:\n v1: x\n v2: y"
assert_raises(ActiveRecord::RecordInvalid) do
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
end
end
@@ -71,7 +71,7 @@ class WorkflowTest < ActiveSupport::TestCase
# when it does not already have custom values for these fields
w = Workflow.find_by_uuid(workflows(:workflow_with_no_name_and_desc).uuid)
definition = "name: test name 1\ndescription: test desc 1\nother: some more"
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
assert_equal "test name 1", w.name
assert_equal "test desc 1", w.description
@@ -79,7 +79,7 @@ class WorkflowTest < ActiveSupport::TestCase
# Workflow name and desc should be set with values from definition yaml
# when it does not already have custom values for these fields
definition = "name: test name 2\ndescription: test desc 2\nother: some more"
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
assert_equal "test name 2", w.name
assert_equal "test desc 2", w.description
@@ -87,7 +87,7 @@ class WorkflowTest < ActiveSupport::TestCase
# Workflow name and desc should be set with values from definition yaml
# even if it means emptying them out
definition = "more: etc"
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
assert_nil w.name
assert_nil w.description
@@ -95,17 +95,17 @@ class WorkflowTest < ActiveSupport::TestCase
# Workflow name and desc set using definition yaml should be cleared
# if definition yaml is cleared
definition = "name: test name 2\ndescription: test desc 2\nother: some more"
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
definition = nil
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
assert_nil w.name
assert_nil w.description
# Workflow name and desc should be set to provided custom values
definition = "name: test name 3\ndescription: test desc 3\nother: some more"
- w.update_attributes!(name: "remains", description: "remains", definition: definition)
+ w.update!(name: "remains", description: "remains", definition: definition)
w.reload
assert_equal "remains", w.name
assert_equal "remains", w.description
@@ -113,7 +113,7 @@ class WorkflowTest < ActiveSupport::TestCase
# Workflow name and desc should retain provided custom values
# and should not be overwritten by values from yaml
definition = "name: test name 4\ndescription: test desc 4\nother: some more"
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
assert_equal "remains", w.name
assert_equal "remains", w.description
@@ -121,7 +121,7 @@ class WorkflowTest < ActiveSupport::TestCase
# Workflow name and desc should retain provided custom values
# and not be affected by the clearing of the definition yaml
definition = nil
- w.update_attributes!(definition: definition)
+ w.update!(definition: definition)
w.reload
assert_equal "remains", w.name
assert_equal "remains", w.description
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local.go b/services/crunch-dispatch-local/crunch-dispatch-local.go
index c33c2358ca..e455981891 100644
--- a/services/crunch-dispatch-local/crunch-dispatch-local.go
+++ b/services/crunch-dispatch-local/crunch-dispatch-local.go
@@ -102,7 +102,6 @@ func main() {
if client.Insecure {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
} else {
logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
}
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local.service b/services/crunch-dispatch-local/crunch-dispatch-local.service
index e3dd113c71..b4fc10f83e 100644
--- a/services/crunch-dispatch-local/crunch-dispatch-local.service
+++ b/services/crunch-dispatch-local/crunch-dispatch-local.service
@@ -5,8 +5,6 @@
Description=Arvados Crunch Dispatcher for LOCAL service
Documentation=https://doc.arvados.org/
After=network.target
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
StartLimitIntervalSec=0
[Service]
@@ -19,8 +17,5 @@ Restart=always
RestartSec=1
LimitNOFILE=1000000
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
[Install]
WantedBy=multi-user.target
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
index c774584d68..5a9ef91c3d 100644
--- a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
+++ b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
@@ -19,6 +19,8 @@ import (
"time"
"git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/lib/dispatchcloud"
"git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -55,10 +57,11 @@ const initialNiceValue int64 = 10000
type Dispatcher struct {
*dispatch.Dispatcher
- logger logrus.FieldLogger
- cluster *arvados.Cluster
- sqCheck *SqueueChecker
- slurm Slurm
+ logger logrus.FieldLogger
+ cluster *arvados.Cluster
+ sqCheck *SqueueChecker
+ slurm Slurm
+ dbConnector ctrlctx.DBConnector
done chan struct{}
err error
@@ -90,6 +93,7 @@ func (disp *Dispatcher) configure() error {
disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
disp.Client.AuthToken = disp.cluster.SystemRootToken
disp.Client.Insecure = disp.cluster.TLS.Insecure
+ disp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.cluster.PostgreSQL}
if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
// Copy real configs into env vars so [a]
@@ -101,7 +105,6 @@ func (disp *Dispatcher) configure() error {
if disp.Client.Insecure {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
os.Setenv(k, v)
}
@@ -138,6 +141,8 @@ func (disp *Dispatcher) setup() {
}
func (disp *Dispatcher) run() error {
+ dblock.Dispatch.Lock(context.Background(), disp.dbConnector.GetDB)
+ defer dblock.Dispatch.Unlock()
defer disp.sqCheck.Stop()
if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
@@ -192,14 +197,16 @@ func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error
if disp.cluster == nil {
// no instance types configured
args = append(args, disp.slurmConstraintArgs(container)...)
- } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
+ } else if types, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
// ditto
args = append(args, disp.slurmConstraintArgs(container)...)
} else if err != nil {
return nil, err
} else {
- // use instancetype constraint instead of slurm mem/cpu/tmp specs
- args = append(args, "--constraint=instancetype="+it.Name)
+ // use instancetype constraint instead of slurm
+ // mem/cpu/tmp specs (note types[0] is the lowest-cost
+ // suitable instance type)
+ args = append(args, "--constraint=instancetype="+types[0].Name)
}
if len(container.SchedulingParameters.Partitions) > 0 {
diff --git a/services/crunchstat/.gitignore b/services/crunchstat/.gitignore
deleted file mode 100644
index c26270a23a..0000000000
--- a/services/crunchstat/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-crunchstat
diff --git a/services/crunchstat/crunchstat.go b/services/crunchstat/crunchstat.go
deleted file mode 100644
index 6383eae545..0000000000
--- a/services/crunchstat/crunchstat.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "io"
- "log"
- "os"
- "os/exec"
- "os/signal"
- "syscall"
- "time"
-
- "git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/lib/crunchstat"
-)
-
-const MaxLogLine = 1 << 14 // Child stderr lines >16KiB will be split
-
-var (
- signalOnDeadPPID int = 15
- ppidCheckInterval = time.Second
- version = "dev"
-)
-
-func main() {
- reporter := crunchstat.Reporter{
- Logger: log.New(os.Stderr, "crunchstat: ", 0),
- }
-
- flags := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
- flags.StringVar(&reporter.CgroupRoot, "cgroup-root", "", "Root of cgroup tree")
- flags.StringVar(&reporter.CgroupParent, "cgroup-parent", "", "Name of container parent under cgroup")
- flags.StringVar(&reporter.CIDFile, "cgroup-cid", "", "Path to container id file")
- flags.IntVar(&signalOnDeadPPID, "signal-on-dead-ppid", signalOnDeadPPID, "Signal to send child if crunchstat's parent process disappears (0 to disable)")
- flags.DurationVar(&ppidCheckInterval, "ppid-check-interval", ppidCheckInterval, "Time between checks for parent process disappearance")
- pollMsec := flags.Int64("poll", 1000, "Reporting interval, in milliseconds")
- getVersion := flags.Bool("version", false, "Print version information and exit.")
-
- if ok, code := cmd.ParseFlags(flags, os.Args[0], os.Args[1:], "program [args ...]", os.Stderr); !ok {
- os.Exit(code)
- } else if *getVersion {
- fmt.Printf("crunchstat %s\n", version)
- return
- } else if flags.NArg() == 0 {
- fmt.Fprintf(os.Stderr, "missing required argument: program (try -help)\n")
- os.Exit(2)
- }
-
- reporter.Logger.Printf("crunchstat %s started", version)
-
- if reporter.CgroupRoot == "" {
- reporter.Logger.Fatal("error: must provide -cgroup-root")
- } else if signalOnDeadPPID < 0 {
- reporter.Logger.Fatalf("-signal-on-dead-ppid=%d is invalid (use a positive signal number, or 0 to disable)", signalOnDeadPPID)
- }
- reporter.PollPeriod = time.Duration(*pollMsec) * time.Millisecond
-
- reporter.Start()
- err := runCommand(flags.Args(), reporter.Logger)
- reporter.Stop()
-
- if err, ok := err.(*exec.ExitError); ok {
- // The program has exited with an exit code != 0
-
- // This works on both Unix and Windows. Although
- // package syscall is generally platform dependent,
- // WaitStatus is defined for both Unix and Windows and
- // in both cases has an ExitStatus() method with the
- // same signature.
- if status, ok := err.Sys().(syscall.WaitStatus); ok {
- os.Exit(status.ExitStatus())
- } else {
- reporter.Logger.Fatalln("ExitError without WaitStatus:", err)
- }
- } else if err != nil {
- reporter.Logger.Fatalln("error in cmd.Wait:", err)
- }
-}
-
-func runCommand(argv []string, logger *log.Logger) error {
- cmd := exec.Command(argv[0], argv[1:]...)
-
- logger.Println("Running", argv)
-
- // Child process will use our stdin and stdout pipes
- // (we close our copies below)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
-
- // Forward SIGINT and SIGTERM to child process
- sigChan := make(chan os.Signal, 1)
- go func(sig <-chan os.Signal) {
- catch := <-sig
- if cmd.Process != nil {
- cmd.Process.Signal(catch)
- }
- logger.Println("notice: caught signal:", catch)
- }(sigChan)
- signal.Notify(sigChan, syscall.SIGTERM)
- signal.Notify(sigChan, syscall.SIGINT)
-
- // Kill our child proc if our parent process disappears
- if signalOnDeadPPID != 0 {
- go sendSignalOnDeadPPID(ppidCheckInterval, signalOnDeadPPID, os.Getppid(), cmd, logger)
- }
-
- // Funnel stderr through our channel
- stderrPipe, err := cmd.StderrPipe()
- if err != nil {
- logger.Fatalln("error in StderrPipe:", err)
- }
-
- // Run subprocess
- if err := cmd.Start(); err != nil {
- logger.Fatalln("error in cmd.Start:", err)
- }
-
- // Close stdin/stdout in this (parent) process
- os.Stdin.Close()
- os.Stdout.Close()
-
- copyPipeToChildLog(stderrPipe, log.New(os.Stderr, "", 0))
-
- return cmd.Wait()
-}
-
-func sendSignalOnDeadPPID(intvl time.Duration, signum, ppidOrig int, cmd *exec.Cmd, logger *log.Logger) {
- ticker := time.NewTicker(intvl)
- for range ticker.C {
- ppid := os.Getppid()
- if ppid == ppidOrig {
- continue
- }
- if cmd.Process == nil {
- // Child process isn't running yet
- continue
- }
- logger.Printf("notice: crunchstat ppid changed from %d to %d -- killing child pid %d with signal %d", ppidOrig, ppid, cmd.Process.Pid, signum)
- err := cmd.Process.Signal(syscall.Signal(signum))
- if err != nil {
- logger.Printf("error: sending signal: %s", err)
- continue
- }
- ticker.Stop()
- break
- }
-}
-
-func copyPipeToChildLog(in io.ReadCloser, logger *log.Logger) {
- reader := bufio.NewReaderSize(in, MaxLogLine)
- var prefix string
- for {
- line, isPrefix, err := reader.ReadLine()
- if err == io.EOF {
- break
- } else if err != nil {
- logger.Fatal("error reading child stderr:", err)
- }
- var suffix string
- if isPrefix {
- suffix = "[...]"
- }
- logger.Print(prefix, string(line), suffix)
- // Set up prefix for following line
- if isPrefix {
- prefix = "[...]"
- } else {
- prefix = ""
- }
- }
- in.Close()
-}
diff --git a/services/crunchstat/crunchstat_test.go b/services/crunchstat/crunchstat_test.go
deleted file mode 100644
index eb02395f00..0000000000
--- a/services/crunchstat/crunchstat_test.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math/rand"
- "os"
- "os/exec"
- "sync"
- "syscall"
- "testing"
- "time"
-)
-
-// Test that CopyPipeToChildLog works even on lines longer than
-// bufio.MaxScanTokenSize.
-func TestCopyPipeToChildLogLongLines(t *testing.T) {
- logger, logBuf := bufLogger()
-
- pipeIn, pipeOut := io.Pipe()
- copied := make(chan bool)
- go func() {
- copyPipeToChildLog(pipeIn, logger)
- close(copied)
- }()
-
- sentBytes := make([]byte, bufio.MaxScanTokenSize+MaxLogLine+(1<<22))
- go func() {
- pipeOut.Write([]byte("before\n"))
-
- for i := range sentBytes {
- // Some bytes that aren't newlines:
- sentBytes[i] = byte((rand.Int() & 0xff) | 0x80)
- }
- sentBytes[len(sentBytes)-1] = '\n'
- pipeOut.Write(sentBytes)
-
- pipeOut.Write([]byte("after"))
- pipeOut.Close()
- }()
-
- if before, err := logBuf.ReadBytes('\n'); err != nil || string(before) != "before\n" {
- t.Fatalf("\"before\n\" not received (got \"%s\", %s)", before, err)
- }
-
- var receivedBytes []byte
- done := false
- for !done {
- line, err := logBuf.ReadBytes('\n')
- if err != nil {
- t.Fatal(err)
- }
- if len(line) >= 5 && string(line[0:5]) == "[...]" {
- if receivedBytes == nil {
- t.Fatal("Beginning of line reported as continuation")
- }
- line = line[5:]
- }
- if len(line) >= 6 && string(line[len(line)-6:]) == "[...]\n" {
- line = line[:len(line)-6]
- } else {
- done = true
- }
- receivedBytes = append(receivedBytes, line...)
- }
- if bytes.Compare(receivedBytes, sentBytes) != 0 {
- t.Fatalf("sent %d bytes, got %d different bytes", len(sentBytes), len(receivedBytes))
- }
-
- if after, err := logBuf.ReadBytes('\n'); err != nil || string(after) != "after\n" {
- t.Fatalf("\"after\n\" not received (got \"%s\", %s)", after, err)
- }
-
- select {
- case <-time.After(time.Second):
- t.Fatal("Timeout")
- case <-copied:
- // Done.
- }
-}
-
-func bufLogger() (*log.Logger, *bufio.Reader) {
- r, w := io.Pipe()
- logger := log.New(w, "", 0)
- return logger, bufio.NewReader(r)
-}
-
-func TestSignalOnDeadPPID(t *testing.T) {
- if !testDeadParent(t, 0) {
- t.Fatal("child should still be alive after parent dies")
- }
- if testDeadParent(t, 15) {
- t.Fatal("child should have been killed when parent died")
- }
-}
-
-// testDeadParent returns true if crunchstat's child proc is still
-// alive after its parent dies.
-func testDeadParent(t *testing.T, signum int) bool {
- var err error
- var bin, childlockfile, parentlockfile *os.File
- for _, f := range []**os.File{&bin, &childlockfile, &parentlockfile} {
- *f, err = ioutil.TempFile("", "crunchstat_")
- if err != nil {
- t.Fatal(err)
- }
- defer (*f).Close()
- defer os.Remove((*f).Name())
- }
-
- bin.Close()
- err = exec.Command("go", "build", "-o", bin.Name()).Run()
- if err != nil {
- t.Fatal(err)
- }
-
- err = syscall.Flock(int(parentlockfile.Fd()), syscall.LOCK_EX)
- if err != nil {
- t.Fatal(err)
- }
-
- cmd := exec.Command("bash", "-c", `
-set -e
-"$BINFILE" -cgroup-root=/none -ppid-check-interval=10ms -signal-on-dead-ppid="$SIGNUM" bash -c '
- set -e
- unlock() {
- flock --unlock "$CHILDLOCKFD"
- kill %1
- }
- trap unlock TERM
- flock --exclusive "$CHILDLOCKFD"
- echo -n "$$" > "$CHILDLOCKFILE"
- flock --unlock "$PARENTLOCKFD"
- sleep 20 /dev/null 2>/dev/null &
- wait %1
- unlock
-' &
-
-# wait for inner bash to start, to ensure $BINFILE has seen this bash proc as its initial PPID
-flock --exclusive "$PARENTLOCKFILE" true
-`)
- cmd.Env = append(os.Environ(),
- "SIGNUM="+fmt.Sprintf("%d", signum),
- "PARENTLOCKFD=3",
- "PARENTLOCKFILE="+parentlockfile.Name(),
- "CHILDLOCKFD=4",
- "CHILDLOCKFILE="+childlockfile.Name(),
- "BINFILE="+bin.Name())
- cmd.ExtraFiles = []*os.File{parentlockfile, childlockfile}
- stderr, err := cmd.StderrPipe()
- if err != nil {
- t.Fatal(err)
- }
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- t.Fatal(err)
- }
- cmd.Start()
- defer cmd.Wait()
-
- var wg sync.WaitGroup
- wg.Add(2)
- defer wg.Wait()
- for _, rdr := range []io.ReadCloser{stderr, stdout} {
- go func(rdr io.ReadCloser) {
- defer wg.Done()
- buf := make([]byte, 1024)
- for {
- n, err := rdr.Read(buf)
- if n > 0 {
- t.Logf("%s", buf[:n])
- }
- if err != nil {
- return
- }
- }
- }(rdr)
- }
-
- // Wait until inner bash process releases parentlockfile
- // (which means it has locked childlockfile and written its
- // PID)
- err = exec.Command("flock", "--exclusive", parentlockfile.Name(), "true").Run()
- if err != nil {
- t.Fatal(err)
- }
-
- childDone := make(chan bool)
- go func() {
- // Notify the main thread when the inner bash process
- // releases its lock on childlockfile (which means
- // either its sleep process ended or it received a
- // TERM signal).
- t0 := time.Now()
- err = exec.Command("flock", "--exclusive", childlockfile.Name(), "true").Run()
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("child done after %s", time.Since(t0))
- close(childDone)
- }()
-
- select {
- case <-time.After(500 * time.Millisecond):
- // Inner bash process is still alive after the timeout
- // period. Kill it now, so our stdout and stderr pipes
- // can finish and we don't leave a mess of child procs
- // behind.
- buf, err := ioutil.ReadFile(childlockfile.Name())
- if err != nil {
- t.Fatal(err)
- }
- var childPID int
- _, err = fmt.Sscanf(string(buf), "%d", &childPID)
- if err != nil {
- t.Fatal(err)
- }
- child, err := os.FindProcess(childPID)
- if err != nil {
- t.Fatal(err)
- }
- child.Signal(syscall.Signal(15))
- return true
-
- case <-childDone:
- // Inner bash process ended soon after its grandparent
- // ended.
- return false
- }
-}
diff --git a/services/dockercleaner/arvados-docker-cleaner.service b/services/dockercleaner/arvados-docker-cleaner.service
index 2aab42b2a3..819c920ff2 100644
--- a/services/dockercleaner/arvados-docker-cleaner.service
+++ b/services/dockercleaner/arvados-docker-cleaner.service
@@ -6,8 +6,6 @@
Description=Arvados Docker Image Cleaner
Documentation=https://doc.arvados.org/
After=network.target
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
StartLimitIntervalSec=0
[Service]
@@ -15,14 +13,7 @@ Type=simple
Restart=always
RestartSec=10s
RestartPreventExitStatus=2
-#
-# This unwieldy ExecStart command detects at runtime whether
-# arvados-docker-cleaner is installed with the Python 3.3 Software
-# Collection, and if so, invokes it with the "scl" wrapper.
-ExecStart=/bin/sh -c 'if [ -e /opt/rh/rh-python36/root/bin/arvados-docker-cleaner ]; then exec scl enable rh-python36 arvados-docker-cleaner; else exec arvados-docker-cleaner; fi'
-
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
+ExecStart=/usr/bin/arvados-docker-cleaner
[Install]
WantedBy=multi-user.target
diff --git a/services/dockercleaner/arvados_docker/cleaner.py b/services/dockercleaner/arvados_docker/cleaner.py
index 2a0e8b9108..df624698ba 100755
--- a/services/dockercleaner/arvados_docker/cleaner.py
+++ b/services/dockercleaner/arvados_docker/cleaner.py
@@ -362,7 +362,7 @@ def main(arguments=sys.argv[1:]):
config = load_config(arguments)
configure_logging(config)
try:
- run(config, docker.Client(version='1.14'))
+ run(config, docker.APIClient(version='1.35'))
except KeyboardInterrupt:
sys.exit(1)
diff --git a/services/dockercleaner/arvados_version.py b/services/dockercleaner/arvados_version.py
index 38e6f564e7..794b6afe42 100644
--- a/services/dockercleaner/arvados_version.py
+++ b/services/dockercleaner/arvados_version.py
@@ -1,57 +1,145 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+#
+# This file runs in one of three modes:
+#
+# 1. If the ARVADOS_BUILDING_VERSION environment variable is set, it writes
+# _version.py and generates dependencies based on that value.
+# 2. If running from an arvados Git checkout, it writes _version.py
+# and generates dependencies from Git.
+# 3. Otherwise, we expect this is source previously generated from Git, and
+# it reads _version.py and generates dependencies from it.
-import subprocess
-import time
import os
import re
+import runpy
+import subprocess
import sys
-SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
-VERSION_PATHS = {
- SETUP_DIR,
- os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
- }
+from pathlib import Path
+
+# These maps explain the relationships between different Python modules in
+# the arvados repository. We use these to help generate setup.py.
+PACKAGE_DEPENDENCY_MAP = {
+ 'arvados-cwl-runner': ['arvados-python-client', 'crunchstat_summary'],
+ 'arvados-user-activity': ['arvados-python-client'],
+ 'arvados_fuse': ['arvados-python-client'],
+ 'crunchstat_summary': ['arvados-python-client'],
+}
+PACKAGE_MODULE_MAP = {
+ 'arvados-cwl-runner': 'arvados_cwl',
+ 'arvados-docker-cleaner': 'arvados_docker',
+ 'arvados-python-client': 'arvados',
+ 'arvados-user-activity': 'arvados_user_activity',
+ 'arvados_fuse': 'arvados_fuse',
+ 'crunchstat_summary': 'crunchstat_summary',
+}
+PACKAGE_SRCPATH_MAP = {
+ 'arvados-cwl-runner': Path('sdk', 'cwl'),
+ 'arvados-docker-cleaner': Path('services', 'dockercleaner'),
+ 'arvados-python-client': Path('sdk', 'python'),
+ 'arvados-user-activity': Path('tools', 'user-activity'),
+ 'arvados_fuse': Path('services', 'fuse'),
+ 'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+}
+
+ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
+SETUP_DIR = Path(__file__).absolute().parent
+try:
+ REPO_PATH = Path(subprocess.check_output(
+ ['git', '-C', str(SETUP_DIR), 'rev-parse', '--show-toplevel'],
+ stderr=subprocess.DEVNULL,
+ text=True,
+ ).rstrip('\n'))
+except (subprocess.CalledProcessError, OSError):
+ REPO_PATH = None
+else:
+ # Verify this is the arvados monorepo
+ if all((REPO_PATH / path).exists() for path in PACKAGE_SRCPATH_MAP.values()):
+ PACKAGE_NAME, = (
+ pkg_name for pkg_name, path in PACKAGE_SRCPATH_MAP.items()
+ if (REPO_PATH / path) == SETUP_DIR
+ )
+ MODULE_NAME = PACKAGE_MODULE_MAP[PACKAGE_NAME]
+ VERSION_SCRIPT_PATH = Path(REPO_PATH, 'build', 'version-at-commit.sh')
+ else:
+ REPO_PATH = None
+if REPO_PATH is None:
+ (PACKAGE_NAME, MODULE_NAME), = (
+ (pkg_name, mod_name)
+ for pkg_name, mod_name in PACKAGE_MODULE_MAP.items()
+ if (SETUP_DIR / mod_name).is_dir()
+ )
+
+def short_tests_only(arglist=sys.argv):
+ try:
+ arglist.remove('--short-tests-only')
+ except ValueError:
+ return False
+ else:
+ return True
+
+def git_log_output(path, *args):
+ return subprocess.check_output(
+ ['git', '-C', str(REPO_PATH),
+ 'log', '--first-parent', '--max-count=1',
+ *args, str(path)],
+ text=True,
+ ).rstrip('\n')
def choose_version_from():
- ts = {}
- for path in VERSION_PATHS:
- ts[subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', path]).strip()] = path
-
- sorted_ts = sorted(ts.items())
- getver = sorted_ts[-1][1]
- print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+ ver_paths = [SETUP_DIR, VERSION_SCRIPT_PATH, *(
+ PACKAGE_SRCPATH_MAP[pkg]
+ for pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ())
+ )]
+ getver = max(ver_paths, key=lambda path: git_log_output(path, '--format=format:%ct'))
+ print(f"Using {getver} for version number calculation of {SETUP_DIR}", file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
- myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
- '--format=%H', curdir]).strip()
- myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
- return myversion
+ myhash = git_log_output(curdir, '--format=%H')
+ return subprocess.check_output(
+ [str(VERSION_SCRIPT_PATH), myhash],
+ text=True,
+ ).rstrip('\n')
def save_version(setup_dir, module, v):
- v = v.replace("~dev", ".dev").replace("~rc", "rc")
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ with Path(setup_dir, module, '_version.py').open('w') as fp:
+ print(f"__version__ = {v!r}", file=fp)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
-
-def get_version(setup_dir, module):
- env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+ file_vars = runpy.run_path(Path(setup_dir, module, '_version.py'))
+ return file_vars['__version__']
- if env_version:
- save_version(setup_dir, module, env_version)
+def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
+ if ENV_VERSION:
+ version = ENV_VERSION
+ elif REPO_PATH is None:
+ return read_version(setup_dir, module)
else:
- try:
- save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError) as err:
- print("ERROR: {0}".format(err), file=sys.stderr)
- pass
+ version = git_version_at_commit()
+ version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ save_version(setup_dir, module, version)
+ return version
+
+def iter_dependencies(version=None):
+ if version is None:
+ version = get_version()
+ # A packaged development release should be installed with other
+ # development packages built from the same source, but those
+ # dependencies may have earlier "dev" versions (read: less recent
+ # Git commit timestamps). This compatible version dependency
+ # expresses that as closely as possible. Allowing versions
+ # compatible with .dev0 allows any development release.
+ # Regular expression borrowed partially from
+ #
+ dep_ver, match_count = re.subn(r'\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)
+ dep_op = '~=' if match_count else '=='
+ for dep_pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ()):
+ yield f'{dep_pkg}{dep_op}{dep_ver}'
- return read_version(setup_dir, module)
+# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
+if __name__ == '__main__':
+ print(get_version())
diff --git a/services/dockercleaner/setup.py b/services/dockercleaner/setup.py
index 3bafe9ba86..9c69879b45 100644
--- a/services/dockercleaner/setup.py
+++ b/services/dockercleaner/setup.py
@@ -10,16 +10,10 @@ import re
from setuptools import setup, find_packages
-SETUP_DIR = os.path.dirname(__file__) or '.'
-README = os.path.join(SETUP_DIR, 'README.rst')
-
import arvados_version
-version = arvados_version.get_version(SETUP_DIR, "arvados_docker")
-
-short_tests_only = False
-if '--short-tests-only' in sys.argv:
- short_tests_only = True
- sys.argv.remove('--short-tests-only')
+version = arvados_version.get_version()
+short_tests_only = arvados_version.short_tests_only()
+README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
setup(name="arvados-docker-cleaner",
version=version,
@@ -37,13 +31,11 @@ setup(name="arvados-docker-cleaner",
('share/doc/arvados-docker-cleaner', ['agpl-3.0.txt', 'arvados-docker-cleaner.service']),
],
install_requires=[
- 'docker-py==1.7.2',
+ *arvados_version.iter_dependencies(version),
+ 'docker>=6.1.0',
'setuptools',
],
- tests_require=[
- 'pbr<1.7.0',
- 'mock',
- ],
+ python_requires="~=3.8",
test_suite='tests',
zip_safe=False
)
diff --git a/services/dockercleaner/tests/test_cleaner.py b/services/dockercleaner/tests/test_cleaner.py
index 7580b0128a..cd03538fcd 100644
--- a/services/dockercleaner/tests/test_cleaner.py
+++ b/services/dockercleaner/tests/test_cleaner.py
@@ -13,7 +13,7 @@ import time
import unittest
import docker
-import mock
+from unittest import mock
from arvados_docker import cleaner
@@ -394,7 +394,7 @@ class RunTestCase(unittest.TestCase):
self.assertEqual(event_kwargs[0]['until'], event_kwargs[1]['since'])
-@mock.patch('docker.Client', name='docker_client')
+@mock.patch('docker.APIClient', name='docker_client')
@mock.patch('arvados_docker.cleaner.run', name='cleaner_run')
class MainTestCase(unittest.TestCase):
@@ -404,11 +404,9 @@ class MainTestCase(unittest.TestCase):
cf.flush()
cleaner.main(['--config', cf.name])
self.assertEqual(1, docker_client.call_count)
- # 1.14 is the first version that's well defined, going back to
- # Docker 1.2, and still supported up to at least Docker 1.9.
- # See
- # .
- self.assertEqual('1.14',
+ # We are standardized on Docker API version 1.35.
+ # See DockerAPIVersion in lib/crunchrun/docker.go.
+ self.assertEqual('1.35',
docker_client.call_args[1].get('version'))
self.assertEqual(1, run_mock.call_count)
self.assertIs(run_mock.call_args[0][1], docker_client())
diff --git a/services/fuse/README.rst b/services/fuse/README.rst
index 0416d3dbd2..12c6ae6ca1 100644
--- a/services/fuse/README.rst
+++ b/services/fuse/README.rst
@@ -21,17 +21,29 @@ Installation
Installing under your user account
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This method lets you install the package without root access.
-However, other users on the same system won't be able to use it.
+This method lets you install the package without root access. However,
+other users on the same system will need to reconfigure their shell in order
+to be able to use it. Run the following to install the package in an
+environment at ``~/arvclients``::
-1. Run ``pip install --user arvados_fuse``.
+ python3 -m venv ~/arvclients
+ ~/arvclients/bin/pip install arvados_fuse
-2. In your shell configuration, make sure you add ``$HOME/.local/bin``
- to your PATH environment variable. For example, you could add the
- command ``PATH=$PATH:$HOME/.local/bin`` to your ``.bashrc`` file.
+Command line tools will be installed under ``~/arvclients/bin``. You can
+test one by running::
-3. Reload your shell configuration. For example, bash users could run
- ``source ~/.bashrc``.
+ ~/arvclients/bin/arv-mount --version
+
+You can run these tools by specifying the full path every time, or you can
+add the directory to your shell's search path by running::
+
+ export PATH="$PATH:$HOME/arvclients/bin"
+
+You can make this search path change permanent by adding this command to
+your shell's configuration, for example ``~/.bashrc`` if you're using bash.
+You can test the change by running::
+
+ arv-mount --version
Installing on Debian systems
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -66,5 +78,5 @@ $ apt-get install python-dev pkg-config libfuse-dev libattr1-dev
This package is one part of the Arvados source package, and it has
integration tests to check interoperability with other Arvados
components. Our `hacking guide
-`_
+`_
describes how to set up a development environment and run tests.
diff --git a/services/fuse/arvados_fuse/__init__.py b/services/fuse/arvados_fuse/__init__.py
index 14dd7f3f85..d827aefab7 100644
--- a/services/fuse/arvados_fuse/__init__.py
+++ b/services/fuse/arvados_fuse/__init__.py
@@ -47,16 +47,15 @@ The general FUSE operation flow is as follows:
The FUSE driver supports the Arvados event bus. When an event is received for
an object that is live in the inode cache, that object is immediately updated.
+Implementation note: in the code, the terms 'object', 'entry' and
+'inode' are used somewhat interchangeably, but generally mean an
+arvados_fuse.File or arvados_fuse.Directory object which has numeric
+inode assigned to it and appears in the Inodes._entries dictionary.
+
"""
from __future__ import absolute_import
from __future__ import division
-from future.utils import viewitems
-from future.utils import native
-from future.utils import listvalues
-from future.utils import listitems
-from future import standard_library
-standard_library.install_aliases()
from builtins import next
from builtins import str
from builtins import object
@@ -76,22 +75,11 @@ import functools
import arvados.keep
from prometheus_client import Summary
import queue
-
-# Default _notify_queue has a limit of 1000 items, but it really needs to be
-# unlimited to avoid deadlocks, see https://arvados.org/issues/3198#note-43 for
-# details.
-
-if hasattr(llfuse, 'capi'):
- # llfuse < 0.42
- llfuse.capi._notify_queue = queue.Queue()
-else:
- # llfuse >= 0.42
- llfuse._notify_queue = queue.Queue()
-
-LLFUSE_VERSION_0 = llfuse.__version__.startswith('0')
+from dataclasses import dataclass
+import typing
from .fusedir import Directory, CollectionDirectory, TmpCollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
-from .fusefile import StringFile, FuseArvadosFile
+from .fusefile import File, StringFile, FuseArvadosFile
_logger = logging.getLogger('arvados.arvados_fuse')
@@ -128,28 +116,47 @@ class FileHandle(Handle):
class DirectoryHandle(Handle):
"""Connects a numeric file handle to a Directory object that has
- been opened by the client."""
+ been opened by the client.
+
+ DirectoryHandle is used by opendir() and readdir() to get
+ directory listings. Entries returned by readdir() don't increment
+ the lookup count (kernel references), so increment our internal
+ "use count" to avoid having an item being removed mid-read.
+
+ """
def __init__(self, fh, dirobj, entries):
super(DirectoryHandle, self).__init__(fh, dirobj)
self.entries = entries
+ for ent in self.entries:
+ ent[1].inc_use()
+
+ def release(self):
+ for ent in self.entries:
+ ent[1].dec_use()
+ super(DirectoryHandle, self).release()
+
class InodeCache(object):
"""Records the memory footprint of objects and when they are last used.
- When the cache limit is exceeded, the least recently used objects are
- cleared. Clearing the object means discarding its contents to release
- memory. The next time the object is accessed, it must be re-fetched from
- the server. Note that the inode cache limit is a soft limit; the cache
- limit may be exceeded if necessary to load very large objects, it may also
- be exceeded if open file handles prevent objects from being cleared.
+ When the cache limit is exceeded, the least recently used objects
+ are cleared. Clearing the object means discarding its contents to
+ release memory. The next time the object is accessed, it must be
+ re-fetched from the server. Note that the inode cache limit is a
+ soft limit; the cache limit may be exceeded if necessary to load
+ very large projects or collections, it may also be exceeded if an
+ inode can't be safely discarded based on kernel lookups
+ (has_ref()) or internal use count (in_use()).
"""
def __init__(self, cap, min_entries=4):
- self._entries = collections.OrderedDict()
- self._by_uuid = {}
+ # Standard dictionaries are ordered, but OrderedDict is still better here, see
+ # https://docs.python.org/3.11/library/collections.html#ordereddict-objects
+ # specifically we use move_to_end() which standard dicts don't have.
+ self._cache_entries = collections.OrderedDict()
self.cap = cap
self._total = 0
self.min_entries = min_entries
@@ -157,104 +164,148 @@ class InodeCache(object):
def total(self):
return self._total
- def _remove(self, obj, clear):
- if clear:
- # Kernel behavior seems to be that if a file is
- # referenced, its parents remain referenced too. This
- # means has_ref() exits early when a collection is not
- # candidate for eviction.
- #
- # By contrast, in_use() doesn't increment references on
- # parents, so it requires a full tree walk to determine if
- # a collection is a candidate for eviction. This takes
- # .07s for 240000 files, which becomes a major drag when
- # cap_cache is being called several times a second and
- # there are multiple non-evictable collections in the
- # cache.
- #
- # So it is important for performance that we do the
- # has_ref() check first.
-
- if obj.has_ref(True):
- _logger.debug("InodeCache cannot clear inode %i, still referenced", obj.inode)
- return
+ def evict_candidates(self):
+ """Yield entries that are candidates to be evicted
+ and stop when the cache total has shrunk sufficiently.
- if obj.in_use():
- _logger.debug("InodeCache cannot clear inode %i, in use", obj.inode)
- return
+ Implements a LRU cache, when an item is added or touch()ed it
+ goes to the back of the OrderedDict, so items in the front are
+ oldest. The Inodes._remove() function determines if the entry
+ can actually be removed safely.
- obj.kernel_invalidate()
- _logger.debug("InodeCache sent kernel invalidate inode %i", obj.inode)
- obj.clear()
+ """
- # The llfuse lock is released in del_entry(), which is called by
- # Directory.clear(). While the llfuse lock is released, it can happen
- # that a reentrant call removes this entry before this call gets to it.
- # Ensure that the entry is still valid before trying to remove it.
- if obj.inode not in self._entries:
+ if self._total <= self.cap:
return
- self._total -= obj.cache_size
- del self._entries[obj.inode]
- if obj.cache_uuid:
- self._by_uuid[obj.cache_uuid].remove(obj)
- if not self._by_uuid[obj.cache_uuid]:
- del self._by_uuid[obj.cache_uuid]
- obj.cache_uuid = None
- if clear:
- _logger.debug("InodeCache cleared inode %i total now %i", obj.inode, self._total)
+ _logger.debug("InodeCache evict_candidates total %i cap %i entries %i", self._total, self.cap, len(self._cache_entries))
- def cap_cache(self):
- if self._total > self.cap:
- for ent in listvalues(self._entries):
- if self._total < self.cap or len(self._entries) < self.min_entries:
- break
- self._remove(ent, True)
-
- def manage(self, obj):
- if obj.persisted():
- obj.cache_size = obj.objsize()
- self._entries[obj.inode] = obj
- obj.cache_uuid = obj.uuid()
- if obj.cache_uuid:
- if obj.cache_uuid not in self._by_uuid:
- self._by_uuid[obj.cache_uuid] = [obj]
- else:
- if obj not in self._by_uuid[obj.cache_uuid]:
- self._by_uuid[obj.cache_uuid].append(obj)
- self._total += obj.objsize()
- _logger.debug("InodeCache touched inode %i (size %i) (uuid %s) total now %i (%i entries)",
- obj.inode, obj.objsize(), obj.cache_uuid, self._total, len(self._entries))
- self.cap_cache()
+ # Copy this into a deque for two reasons:
+ #
+ # 1. _cache_entries is modified by unmanage() which is called
+ # by _remove
+ #
+ # 2. popping off the front means the reference goes away
+ # immediately intead of sticking around for the lifetime of
+ # "values"
+ values = collections.deque(self._cache_entries.values())
- def touch(self, obj):
- if obj.persisted():
- if obj.inode in self._entries:
- self._remove(obj, False)
- self.manage(obj)
+ while values:
+ if self._total < self.cap or len(self._cache_entries) < self.min_entries:
+ break
+ yield values.popleft()
- def unmanage(self, obj):
- if obj.persisted() and obj.inode in self._entries:
- self._remove(obj, True)
+ def unmanage(self, entry):
+ """Stop managing an object in the cache.
- def find_by_uuid(self, uuid):
- return self._by_uuid.get(uuid, [])
+ This happens when an object is being removed from the inode
+ entries table.
+
+ """
+
+ if entry.inode not in self._cache_entries:
+ return
+
+ # manage cache size running sum
+ self._total -= entry.cache_size
+ entry.cache_size = 0
+
+ # Now forget about it
+ del self._cache_entries[entry.inode]
+
+ def update_cache_size(self, obj):
+ """Update the cache total in response to the footprint of an
+ object changing (usually because it has been loaded or
+ cleared).
+
+ Adds or removes entries to the cache list based on the object
+ cache size.
+
+ """
+
+ if not obj.persisted():
+ return
+
+ if obj.inode in self._cache_entries:
+ self._total -= obj.cache_size
+
+ obj.cache_size = obj.objsize()
+
+ if obj.cache_size > 0 or obj.parent_inode is None:
+ self._total += obj.cache_size
+ self._cache_entries[obj.inode] = obj
+ elif obj.cache_size == 0 and obj.inode in self._cache_entries:
+ del self._cache_entries[obj.inode]
+
+ def touch(self, obj):
+ """Indicate an object was used recently, making it low
+ priority to be removed from the cache.
+
+ """
+ if obj.inode in self._cache_entries:
+ self._cache_entries.move_to_end(obj.inode)
+ return True
+ return False
def clear(self):
- self._entries.clear()
- self._by_uuid.clear()
+ self._cache_entries.clear()
self._total = 0
+@dataclass
+class RemoveInode:
+ entry: typing.Union[Directory, File]
+ def inode_op(self, inodes, locked_ops):
+ if locked_ops is None:
+ inodes._remove(self.entry)
+ return True
+ else:
+ locked_ops.append(self)
+ return False
+
+@dataclass
+class InvalidateInode:
+ inode: int
+ def inode_op(self, inodes, locked_ops):
+ llfuse.invalidate_inode(self.inode)
+ return True
+
+@dataclass
+class InvalidateEntry:
+ inode: int
+ name: str
+ def inode_op(self, inodes, locked_ops):
+ llfuse.invalidate_entry(self.inode, self.name)
+ return True
+
+@dataclass
+class EvictCandidates:
+ def inode_op(self, inodes, locked_ops):
+ return True
+
+
class Inodes(object):
- """Manage the set of inodes. This is the mapping from a numeric id
- to a concrete File or Directory object"""
+ """Manage the set of inodes.
+
+ This is the mapping from a numeric id to a concrete File or
+ Directory object
- def __init__(self, inode_cache, encoding="utf-8"):
+ """
+
+ def __init__(self, inode_cache, encoding="utf-8", fsns=None, shutdown_started=None):
self._entries = {}
self._counter = itertools.count(llfuse.ROOT_INODE)
self.inode_cache = inode_cache
self.encoding = encoding
- self.deferred_invalidations = []
+ self._fsns = fsns
+ self._shutdown_started = shutdown_started or threading.Event()
+
+ self._inode_remove_queue = queue.Queue()
+ self._inode_remove_thread = threading.Thread(None, self._inode_remove)
+ self._inode_remove_thread.daemon = True
+ self._inode_remove_thread.start()
+
+ self.cap_cache_event = threading.Event()
+ self._by_uuid = collections.defaultdict(list)
def __getitem__(self, item):
return self._entries[item]
@@ -266,50 +317,196 @@ class Inodes(object):
return iter(self._entries.keys())
def items(self):
- return viewitems(self._entries.items())
+ return self._entries.items()
def __contains__(self, k):
return k in self._entries
def touch(self, entry):
+ """Update the access time, adjust the cache position, and
+ notify the _inode_remove thread to recheck the cache.
+
+ """
+
entry._atime = time.time()
- self.inode_cache.touch(entry)
+ if self.inode_cache.touch(entry):
+ self.cap_cache()
+
+ def cap_cache(self):
+ """Notify the _inode_remove thread to recheck the cache."""
+ if not self.cap_cache_event.is_set():
+ self.cap_cache_event.set()
+ self._inode_remove_queue.put(EvictCandidates())
+
+ def update_uuid(self, entry):
+ """Update the Arvados uuid associated with an inode entry.
+
+ This is used to look up inodes that need to be invalidated
+ when a websocket event indicates the object has changed on the
+ API server.
+
+ """
+ if entry.cache_uuid and entry in self._by_uuid[entry.cache_uuid]:
+ self._by_uuid[entry.cache_uuid].remove(entry)
+
+ entry.cache_uuid = entry.uuid()
+ if entry.cache_uuid and entry not in self._by_uuid[entry.cache_uuid]:
+ self._by_uuid[entry.cache_uuid].append(entry)
+
+ if not self._by_uuid[entry.cache_uuid]:
+ del self._by_uuid[entry.cache_uuid]
def add_entry(self, entry):
+ """Assign a numeric inode to a new entry."""
+
entry.inode = next(self._counter)
if entry.inode == llfuse.ROOT_INODE:
entry.inc_ref()
self._entries[entry.inode] = entry
- self.inode_cache.manage(entry)
+
+ self.update_uuid(entry)
+ self.inode_cache.update_cache_size(entry)
+ self.cap_cache()
return entry
def del_entry(self, entry):
- if entry.ref_count == 0:
- self.inode_cache.unmanage(entry)
- del self._entries[entry.inode]
+ """Remove entry from the inode table.
+
+ Indicate this inode entry is pending deletion by setting
+ parent_inode to None. Notify the _inode_remove thread to try
+ and remove it.
+
+ """
+
+ entry.parent_inode = None
+ self._inode_remove_queue.put(RemoveInode(entry))
+ _logger.debug("del_entry on inode %i with refcount %i", entry.inode, entry.ref_count)
+
+ def _inode_remove(self):
+ """Background thread to handle tasks related to invalidating
+ inodes in the kernel, and removing objects from the inodes
+ table entirely.
+
+ """
+
+ locked_ops = collections.deque()
+ while True:
+ blocking_get = True
+ while True:
+ try:
+ qentry = self._inode_remove_queue.get(blocking_get)
+ except queue.Empty:
+ break
+ blocking_get = False
+ if qentry is None:
+ return
+
+ if self._shutdown_started.is_set():
+ continue
+
+ # Process this entry
+ if qentry.inode_op(self, locked_ops):
+ self._inode_remove_queue.task_done()
+
+ # Give up the reference
+ qentry = None
+
+ with llfuse.lock:
+ while locked_ops:
+ if locked_ops.popleft().inode_op(self, None):
+ self._inode_remove_queue.task_done()
+ self.cap_cache_event.clear()
+ for entry in self.inode_cache.evict_candidates():
+ self._remove(entry)
+
+ def wait_remove_queue_empty(self):
+ # used by tests
+ self._inode_remove_queue.join()
+
+ def _remove(self, entry):
+ """Remove an inode entry if possible.
+
+ If the entry is still referenced or in use, don't do anything.
+ If this is not referenced but the parent is still referenced,
+ clear any data held by the object (which may include directory
+ entries under the object) but don't remove it from the inode
+ table.
+
+ """
+ try:
+ if entry.inode is None:
+ # Removed already
+ return
+
+ if entry.inode == llfuse.ROOT_INODE:
+ return
+
+ if entry.in_use():
+ # referenced internally, stay pinned
+ #_logger.debug("InodeCache cannot clear inode %i, in use", entry.inode)
+ return
+
+ # Tell the kernel it should forget about it
+ entry.kernel_invalidate()
+
+ if entry.has_ref():
+ # has kernel reference, could still be accessed.
+ # when the kernel forgets about it, we can delete it.
+ #_logger.debug("InodeCache cannot clear inode %i, is referenced", entry.inode)
+ return
+
+ # commit any pending changes
with llfuse.lock_released:
entry.finalize()
- entry.inode = None
- else:
- entry.dead = True
- _logger.debug("del_entry on inode %i with refcount %i", entry.inode, entry.ref_count)
+
+ # Clear the contents
+ entry.clear()
+
+ if entry.parent_inode is None:
+ _logger.debug("InodeCache forgetting inode %i, object cache_size %i, cache total %i, forget_inode True, inode entries %i, type %s",
+ entry.inode, entry.cache_size, self.inode_cache.total(),
+ len(self._entries), type(entry))
+
+ if entry.cache_uuid:
+ self._by_uuid[entry.cache_uuid].remove(entry)
+ if not self._by_uuid[entry.cache_uuid]:
+ del self._by_uuid[entry.cache_uuid]
+ entry.cache_uuid = None
+
+ self.inode_cache.unmanage(entry)
+
+ del self._entries[entry.inode]
+ entry.inode = None
+
+ except Exception as e:
+ _logger.exception("failed remove")
def invalidate_inode(self, entry):
- if entry.has_ref(False):
+ if entry.has_ref():
# Only necessary if the kernel has previously done a lookup on this
# inode and hasn't yet forgotten about it.
- llfuse.invalidate_inode(entry.inode)
+ self._inode_remove_queue.put(InvalidateInode(entry.inode))
def invalidate_entry(self, entry, name):
- if entry.has_ref(False):
+ if entry.has_ref():
# Only necessary if the kernel has previously done a lookup on this
# inode and hasn't yet forgotten about it.
- llfuse.invalidate_entry(entry.inode, native(name.encode(self.encoding)))
+ self._inode_remove_queue.put(InvalidateEntry(entry.inode, name.encode(self.encoding)))
+
+ def begin_shutdown(self):
+ self._inode_remove_queue.put(None)
+ if self._inode_remove_thread is not None:
+ self._inode_remove_thread.join()
+ self._inode_remove_thread = None
def clear(self):
+ with llfuse.lock_released:
+ self.begin_shutdown()
+
self.inode_cache.clear()
+ self._by_uuid.clear()
- for k,v in viewitems(self._entries):
+ for k,v in self._entries.items():
try:
v.finalize()
except Exception as e:
@@ -317,6 +514,14 @@ class Inodes(object):
self._entries.clear()
+ def forward_slash_subst(self):
+ return self._fsns
+
+ def find_by_uuid(self, uuid):
+ """Return a list of zero or more inode entries corresponding
+ to this Arvados UUID."""
+ return self._by_uuid.get(uuid, [])
+
def catch_exceptions(orig_func):
"""Catch uncaught exceptions and log them consistently."""
@@ -329,6 +534,8 @@ def catch_exceptions(orig_func):
raise
except EnvironmentError as e:
raise llfuse.FUSEError(e.errno)
+ except NotImplementedError:
+ raise llfuse.FUSEError(errno.ENOTSUP)
except arvados.errors.KeepWriteError as e:
_logger.error("Keep write error: " + str(e))
raise llfuse.FUSEError(errno.EIO)
@@ -375,14 +582,32 @@ class Operations(llfuse.Operations):
rename_time = fuse_time.labels(op='rename')
flush_time = fuse_time.labels(op='flush')
- def __init__(self, uid, gid, api_client, encoding="utf-8", inode_cache=None, num_retries=4, enable_write=False):
+ def __init__(self, uid, gid, api_client, encoding="utf-8", inode_cache=None, num_retries=4, enable_write=False, fsns=None):
super(Operations, self).__init__()
self._api_client = api_client
if not inode_cache:
inode_cache = InodeCache(cap=256*1024*1024)
- self.inodes = Inodes(inode_cache, encoding=encoding)
+
+ if fsns is None:
+ try:
+ fsns = self._api_client.config()["Collections"]["ForwardSlashNameSubstitution"]
+ except KeyError:
+ # old API server with no FSNS config
+ fsns = '_'
+ else:
+ if fsns == '' or fsns == '/':
+ fsns = None
+
+ # If we get overlapping shutdown events (e.g., fusermount -u
+ # -z and operations.destroy()) llfuse calls forget() on inodes
+ # that have already been deleted. To avoid this, we make
+ # forget() a no-op if called after destroy().
+ self._shutdown_started = threading.Event()
+
+ self.inodes = Inodes(inode_cache, encoding=encoding, fsns=fsns,
+ shutdown_started=self._shutdown_started)
self.uid = uid
self.gid = gid
self.enable_write = enable_write
@@ -395,12 +620,6 @@ class Operations(llfuse.Operations):
# is fully initialized should wait() on this event object.
self.initlock = threading.Event()
- # If we get overlapping shutdown events (e.g., fusermount -u
- # -z and operations.destroy()) llfuse calls forget() on inodes
- # that have already been deleted. To avoid this, we make
- # forget() a no-op if called after destroy().
- self._shutdown_started = threading.Event()
-
self.num_retries = num_retries
self.read_counter = arvados.keep.Counter()
@@ -436,23 +655,26 @@ class Operations(llfuse.Operations):
def metric_count_func(self, opname):
return lambda: int(self.metric_value(opname, "arvmount_fuse_operations_seconds_count"))
+ def begin_shutdown(self):
+ self._shutdown_started.set()
+ self.inodes.begin_shutdown()
+
@destroy_time.time()
@catch_exceptions
def destroy(self):
- self._shutdown_started.set()
+ _logger.debug("arv-mount destroy: start")
+
+ with llfuse.lock_released:
+ self.begin_shutdown()
+
if self.events:
self.events.close()
self.events = None
- # Different versions of llfuse require and forbid us to
- # acquire the lock here. See #8345#note-37, #10805#note-9.
- if LLFUSE_VERSION_0 and llfuse.lock.acquire():
- # llfuse < 0.42
- self.inodes.clear()
- llfuse.lock.release()
- else:
- # llfuse >= 0.42
- self.inodes.clear()
+ self.inodes.clear()
+
+ _logger.debug("arv-mount destroy: complete")
+
def access(self, inode, mode, ctx):
return True
@@ -473,28 +695,34 @@ class Operations(llfuse.Operations):
old_attrs = properties.get("old_attributes") or {}
new_attrs = properties.get("new_attributes") or {}
- for item in self.inodes.inode_cache.find_by_uuid(ev["object_uuid"]):
+ for item in self.inodes.find_by_uuid(ev["object_uuid"]):
item.invalidate()
oldowner = old_attrs.get("owner_uuid")
newowner = ev.get("object_owner_uuid")
for parent in (
- self.inodes.inode_cache.find_by_uuid(oldowner) +
- self.inodes.inode_cache.find_by_uuid(newowner)):
+ self.inodes.find_by_uuid(oldowner) +
+ self.inodes.find_by_uuid(newowner)):
parent.invalidate()
@getattr_time.time()
@catch_exceptions
def getattr(self, inode, ctx=None):
if inode not in self.inodes:
+ _logger.debug("arv-mount getattr: inode %i missing", inode)
raise llfuse.FUSEError(errno.ENOENT)
e = self.inodes[inode]
+ self.inodes.touch(e)
+ parent = None
+ if e.parent_inode:
+ parent = self.inodes[e.parent_inode]
+ self.inodes.touch(parent)
entry = llfuse.EntryAttributes()
entry.st_ino = inode
entry.generation = 0
- entry.entry_timeout = 0
+ entry.entry_timeout = parent.time_to_next_poll() if parent is not None else 0
entry.attr_timeout = e.time_to_next_poll() if e.allow_attr_cache else 0
entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
@@ -562,18 +790,23 @@ class Operations(llfuse.Operations):
if name == '.':
inode = parent_inode
- else:
- if parent_inode in self.inodes:
- p = self.inodes[parent_inode]
- self.inodes.touch(p)
- if name == '..':
- inode = p.parent_inode
- elif isinstance(p, Directory) and name in p:
- inode = p[name].inode
+ elif parent_inode in self.inodes:
+ p = self.inodes[parent_inode]
+ self.inodes.touch(p)
+ if name == '..':
+ inode = p.parent_inode
+ elif isinstance(p, Directory) and name in p:
+ if p[name].inode is None:
+ _logger.debug("arv-mount lookup: parent_inode %i name '%s' found but inode was None",
+ parent_inode, name)
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ inode = p[name].inode
if inode != None:
_logger.debug("arv-mount lookup: parent_inode %i name '%s' inode %i",
parent_inode, name, inode)
+ self.inodes.touch(self.inodes[inode])
self.inodes[inode].inc_ref()
return self.getattr(inode)
else:
@@ -589,7 +822,7 @@ class Operations(llfuse.Operations):
for inode, nlookup in inodes:
ent = self.inodes[inode]
_logger.debug("arv-mount forget: inode %i nlookup %i ref_count %i", inode, nlookup, ent.ref_count)
- if ent.dec_ref(nlookup) == 0 and ent.dead:
+ if ent.dec_ref(nlookup) == 0 and ent.parent_inode is None:
self.inodes.del_entry(ent)
@open_time.time()
@@ -598,6 +831,7 @@ class Operations(llfuse.Operations):
if inode in self.inodes:
p = self.inodes[inode]
else:
+ _logger.debug("arv-mount open: inode %i missing", inode)
raise llfuse.FUSEError(errno.ENOENT)
if isinstance(p, Directory):
@@ -679,7 +913,7 @@ class Operations(llfuse.Operations):
finally:
self._filehandles[fh].release()
del self._filehandles[fh]
- self.inodes.inode_cache.cap_cache()
+ self.inodes.cap_cache()
def releasedir(self, fh):
self.release(fh)
@@ -692,6 +926,7 @@ class Operations(llfuse.Operations):
if inode in self.inodes:
p = self.inodes[inode]
else:
+ _logger.debug("arv-mount opendir: called with unknown or removed inode %i", inode)
raise llfuse.FUSEError(errno.ENOENT)
if not isinstance(p, Directory):
@@ -701,11 +936,16 @@ class Operations(llfuse.Operations):
if p.parent_inode in self.inodes:
parent = self.inodes[p.parent_inode]
else:
+ _logger.warning("arv-mount opendir: parent inode %i of %i is missing", p.parent_inode, inode)
raise llfuse.FUSEError(errno.EIO)
+ _logger.debug("arv-mount opendir: inode %i fh %i ", inode, fh)
+
# update atime
+ p.inc_use()
+ self._filehandles[fh] = DirectoryHandle(fh, p, [('.', p), ('..', parent)] + p.items())
+ p.dec_use()
self.inodes.touch(p)
- self._filehandles[fh] = DirectoryHandle(fh, p, [('.', p), ('..', parent)] + listitems(p))
return fh
@readdir_time.time()
@@ -720,8 +960,9 @@ class Operations(llfuse.Operations):
e = off
while e < len(handle.entries):
- if handle.entries[e][1].inode in self.inodes:
- yield (handle.entries[e][0].encode(self.inodes.encoding), self.getattr(handle.entries[e][1].inode), e+1)
+ ent = handle.entries[e]
+ if ent[1].inode in self.inodes:
+ yield (ent[0].encode(self.inodes.encoding), self.getattr(ent[1].inode), e+1)
e += 1
@statfs_time.time()
diff --git a/services/fuse/arvados_fuse/command.py b/services/fuse/arvados_fuse/command.py
index 5f0a1f80f6..f52121d862 100644
--- a/services/fuse/arvados_fuse/command.py
+++ b/services/fuse/arvados_fuse/command.py
@@ -16,6 +16,7 @@ import signal
import subprocess
import sys
import time
+import resource
import arvados.commands._util as arv_cmd
from arvados_fuse import crunchstat
@@ -27,93 +28,336 @@ class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
super(ArgumentParser, self).__init__(
parents=[arv_cmd.retry_opt],
- description='''Mount Keep data under the local filesystem. Default mode is --home''',
- epilog="""
- Note: When using the --exec feature, you must either specify the
- mountpoint before --exec, or mark the end of your --exec arguments
- with "--".
- """)
- self.add_argument('--version', action='version',
- version=u"%s %s" % (sys.argv[0], __version__),
- help='Print version and exit.')
- self.add_argument('mountpoint', type=str, help="""Mount point.""")
- self.add_argument('--allow-other', action='store_true',
- help="""Let other users read the mount""")
- self.add_argument('--subtype', type=str, metavar='STRING',
- help="""Report mounted filesystem type as "fuse.STRING", instead of just "fuse".""")
-
- mode = self.add_mutually_exclusive_group()
-
- mode.add_argument('--all', action='store_const', const='all', dest='mode',
- help="""Mount a subdirectory for each mode: home, shared, by_tag, by_id (default if no --mount-* arguments are given).""")
- mode.add_argument('--custom', action='store_const', const=None, dest='mode',
- help="""Mount a top level meta-directory with subdirectories as specified by additional --mount-* arguments (default if any --mount-* arguments are given).""")
- mode.add_argument('--home', action='store_const', const='home', dest='mode',
- help="""Mount only the user's home project.""")
- mode.add_argument('--shared', action='store_const', const='shared', dest='mode',
- help="""Mount only list of projects shared with the user.""")
- mode.add_argument('--by-tag', action='store_const', const='by_tag', dest='mode',
- help="""Mount subdirectories listed by tag.""")
- mode.add_argument('--by-id', action='store_const', const='by_id', dest='mode',
- help="""Mount subdirectories listed by portable data hash or uuid.""")
- mode.add_argument('--by-pdh', action='store_const', const='by_pdh', dest='mode',
- help="""Mount subdirectories listed by portable data hash.""")
- mode.add_argument('--project', type=str, metavar='UUID',
- help="""Mount the specified project.""")
- mode.add_argument('--collection', type=str, metavar='UUID_or_PDH',
- help="""Mount only the specified collection.""")
-
- mounts = self.add_argument_group('Custom mount options')
- mounts.add_argument('--mount-by-pdh',
- type=str, metavar='PATH', action='append', default=[],
- help="Mount each readable collection at mountpoint/PATH/P where P is the collection's portable data hash.")
- mounts.add_argument('--mount-by-id',
- type=str, metavar='PATH', action='append', default=[],
- help="Mount each readable collection at mountpoint/PATH/UUID and mountpoint/PATH/PDH where PDH is the collection's portable data hash and UUID is its UUID.")
- mounts.add_argument('--mount-by-tag',
- type=str, metavar='PATH', action='append', default=[],
- help="Mount all collections with tag TAG at mountpoint/PATH/TAG/UUID.")
- mounts.add_argument('--mount-home',
- type=str, metavar='PATH', action='append', default=[],
- help="Mount the current user's home project at mountpoint/PATH.")
- mounts.add_argument('--mount-shared',
- type=str, metavar='PATH', action='append', default=[],
- help="Mount projects shared with the current user at mountpoint/PATH.")
- mounts.add_argument('--mount-tmp',
- type=str, metavar='PATH', action='append', default=[],
- help="Create a new collection, mount it in read/write mode at mountpoint/PATH, and delete it when unmounting.")
-
- self.add_argument('--debug', action='store_true', help="""Debug mode""")
- self.add_argument('--logfile', help="""Write debug logs and errors to the specified file (default stderr).""")
- self.add_argument('--foreground', action='store_true', help="""Run in foreground (default is to daemonize unless --exec specified)""", default=False)
- self.add_argument('--encoding', type=str, help="Character encoding to use for filesystem, default is utf-8 (see Python codec registry for list of available encodings)", default="utf-8")
-
- self.add_argument('--file-cache', type=int, help="File data cache size, in bytes (default 256MiB)", default=256*1024*1024)
- self.add_argument('--directory-cache', type=int, help="Directory data cache size, in bytes (default 128MiB)", default=128*1024*1024)
-
- self.add_argument('--disable-event-listening', action='store_true', help="Don't subscribe to events on the API server", dest="disable_event_listening", default=False)
-
- self.add_argument('--read-only', action='store_false', help="Mount will be read only (default)", dest="enable_write", default=False)
- self.add_argument('--read-write', action='store_true', help="Mount will be read-write", dest="enable_write", default=False)
- self.add_argument('--storage-classes', type=str, metavar='CLASSES', help="Specify comma separated list of storage classes to be used when saving data of new collections", default=None)
-
- self.add_argument('--crunchstat-interval', type=float, help="Write stats to stderr every N seconds (default disabled)", default=0)
-
- unmount = self.add_mutually_exclusive_group()
- unmount.add_argument('--unmount', action='store_true', default=False,
- help="Forcefully unmount the specified mountpoint (if it's a fuse mount) and exit. If --subtype is given, unmount only if the mount has the specified subtype. WARNING: This command can affect any kind of fuse mount, not just arv-mount.")
- unmount.add_argument('--unmount-all', action='store_true', default=False,
- help="Forcefully unmount every fuse mount at or below the specified path and exit. If --subtype is given, unmount only mounts that have the specified subtype. Exit non-zero if any other types of mounts are found at or below the given path. WARNING: This command can affect any kind of fuse mount, not just arv-mount.")
- unmount.add_argument('--replace', action='store_true', default=False,
- help="If a fuse mount is already present at mountpoint, forcefully unmount it before mounting")
- self.add_argument('--unmount-timeout',
- type=float, default=2.0,
- help="Time to wait for graceful shutdown after --exec program exits and filesystem is unmounted")
-
- self.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
- dest="exec_args", metavar=('command', 'args', '...', '--'),
- help="""Mount, run a command, then unmount and exit""")
-
+ description="Interact with Arvados data through a local filesystem",
+ )
+ self.add_argument(
+ '--version',
+ action='version',
+ version=u"%s %s" % (sys.argv[0], __version__),
+ help="Print version and exit",
+ )
+ self.add_argument(
+ 'mountpoint',
+ metavar='MOUNT_DIR',
+ help="Directory path to mount data",
+ )
+
+ mode_group = self.add_argument_group("Mount contents")
+ mode = mode_group.add_mutually_exclusive_group()
+ mode.add_argument(
+ '--all',
+ action='store_const',
+ const='all',
+ dest='mode',
+ help="""
+Mount a subdirectory for each mode: `home`, `shared`, `by_id`, and `by_tag`
+(default if no `--mount-*` options are given)
+""",
+ )
+ mode.add_argument(
+ '--custom',
+ action='store_const',
+ const=None,
+ dest='mode',
+ help="""
+Mount a subdirectory for each mode specified by a `--mount-*` option
+(default if any `--mount-*` options are given;
+see "Mount custom layout and filtering" section)
+""",
+ )
+ mode.add_argument(
+ '--collection',
+ metavar='UUID_OR_PDH',
+ help="Mount the specified collection",
+ )
+ mode.add_argument(
+ '--home',
+ action='store_const',
+ const='home',
+ dest='mode',
+ help="Mount your home project",
+ )
+ mode.add_argument(
+ '--project',
+ metavar='UUID',
+ help="Mount the specified project",
+ )
+ mode.add_argument(
+ '--shared',
+ action='store_const',
+ const='shared',
+ dest='mode',
+ help="Mount a subdirectory for each project shared with you",
+ )
+ mode.add_argument(
+ '--by-id',
+ action='store_const',
+ const='by_id',
+ dest='mode',
+ help="""
+Mount a magic directory where collections and projects are accessible through
+subdirectories named after their UUID or portable data hash
+""",
+ )
+ mode.add_argument(
+ '--by-pdh',
+ action='store_const',
+ const='by_pdh',
+ dest='mode',
+ help="""
+Mount a magic directory where collections are accessible through
+subdirectories named after their portable data hash
+""",
+ )
+ mode.add_argument(
+ '--by-tag',
+ action='store_const',
+ const='by_tag',
+ dest='mode',
+ help="Mount a subdirectory for each tag attached to a collection or project",
+ )
+
+ mounts = self.add_argument_group("Mount custom layout and filtering")
+ mounts.add_argument(
+ '--filters',
+ type=arv_cmd.JSONArgument(arv_cmd.validate_filters),
+ help="""
+Filters to apply to all project, shared, and tag directory contents.
+Pass filters as either a JSON string or a path to a JSON file.
+The JSON object should be a list of filters in Arvados API list filter syntax.
+""",
+ )
+ mounts.add_argument(
+ '--mount-home',
+ metavar='PATH',
+ action='append',
+ default=[],
+ help="Make your home project available under the mount at `PATH`",
+ )
+ mounts.add_argument(
+ '--mount-shared',
+ metavar='PATH',
+ action='append',
+ default=[],
+ help="Make projects shared with you available under the mount at `PATH`",
+ )
+ mounts.add_argument(
+ '--mount-tmp',
+ metavar='PATH',
+ action='append',
+ default=[],
+ help="""
+Make a new temporary writable collection available under the mount at `PATH`.
+This collection is deleted when the mount is unmounted.
+""",
+ )
+ mounts.add_argument(
+ '--mount-by-id',
+ metavar='PATH',
+ action='append',
+ default=[],
+ help="""
+Make a magic directory available under the mount at `PATH` where collections and
+projects are accessible through subdirectories named after their UUID or
+portable data hash
+""",
+ )
+ mounts.add_argument(
+ '--mount-by-pdh',
+ metavar='PATH',
+ action='append',
+ default=[],
+ help="""
+Make a magic directory available under the mount at `PATH` where collections
+are accessible through subdirectories named after portable data hash
+""",
+ )
+ mounts.add_argument(
+ '--mount-by-tag',
+ metavar='PATH',
+ action='append',
+ default=[],
+ help="""
+Make a subdirectory for each tag attached to a collection or project available
+under the mount at `PATH`
+""" ,
+ )
+
+ perms = self.add_argument_group("Mount access and permissions")
+ perms.add_argument(
+ '--allow-other',
+ action='store_true',
+ help="Let other users on this system read mounted data (default false)",
+ )
+ perms.add_argument(
+ '--read-only',
+ action='store_false',
+ default=False,
+ dest='enable_write',
+ help="Mounted data cannot be modified from the mount (default)",
+ )
+ perms.add_argument(
+ '--read-write',
+ action='store_true',
+ default=False,
+ dest='enable_write',
+ help="Mounted data can be modified from the mount",
+ )
+
+ lifecycle = self.add_argument_group("Mount lifecycle management")
+ lifecycle.add_argument(
+ '--exec',
+ nargs=argparse.REMAINDER,
+ dest="exec_args",
+ help="""
+Mount data, run the specified command, then unmount and exit.
+`--exec` reads all remaining options as the command to run,
+so it must be the last option you specify.
+Either end your command arguments (and other options) with a `--` argument,
+or specify `--exec` after your mount point.
+""",
+ )
+ lifecycle.add_argument(
+ '--foreground',
+ action='store_true',
+ default=False,
+ help="Run mount process in the foreground instead of daemonizing (default false)",
+ )
+ lifecycle.add_argument(
+ '--subtype',
+ help="Set mounted filesystem type to `fuse.SUBTYPE` (default is just `fuse`)",
+ )
+ unmount = lifecycle.add_mutually_exclusive_group()
+ unmount.add_argument(
+ '--replace',
+ action='store_true',
+ default=False,
+ help="""
+If a FUSE mount is already mounted at the given directory,
+unmount it before mounting the requested data.
+If `--subtype` is specified, unmount only if the mount has that subtype.
+WARNING: This command can affect any kind of FUSE mount, not just arv-mount.
+""",
+ )
+ unmount.add_argument(
+ '--unmount',
+ action='store_true',
+ default=False,
+ help="""
+If a FUSE mount is already mounted at the given directory, unmount it and exit.
+If `--subtype` is specified, unmount only if the mount has that subtype.
+WARNING: This command can affect any kind of FUSE mount, not just arv-mount.
+""",
+ )
+ unmount.add_argument(
+ '--unmount-all',
+ action='store_true',
+ default=False,
+ help="""
+Unmount all FUSE mounts at or below the given directory, then exit.
+If `--subtype` is specified, unmount only if the mount has that subtype.
+WARNING: This command can affect any kind of FUSE mount, not just arv-mount.
+""",
+ )
+ lifecycle.add_argument(
+ '--unmount-timeout',
+ type=float,
+ default=2.0,
+ metavar='SECONDS',
+ help="""
+The number of seconds to wait for a clean unmount after an `--exec` command has
+exited (default %(default).01f).
+After this time, the mount will be forcefully unmounted.
+""",
+ )
+
+ reporting = self.add_argument_group("Mount logging and statistics")
+ reporting.add_argument(
+ '--crunchstat-interval',
+ type=float,
+ default=0.0,
+ metavar='SECONDS',
+ help="Write stats to stderr every N seconds (default disabled)",
+ )
+ reporting.add_argument(
+ '--debug',
+ action='store_true',
+ help="Log debug information",
+ )
+ reporting.add_argument(
+ '--logfile',
+ help="Write debug logs and errors to the specified file (default stderr)",
+ )
+
+ cache = self.add_argument_group("Mount local cache setup")
+ cachetype = cache.add_mutually_exclusive_group()
+ cachetype.add_argument(
+ '--disk-cache',
+ action='store_true',
+ default=True,
+ dest='disk_cache',
+ help="Cache data on the local filesystem (default)",
+ )
+ cachetype.add_argument(
+ '--ram-cache',
+ action='store_false',
+ default=True,
+ dest='disk_cache',
+ help="Cache data in memory",
+ )
+ cache.add_argument(
+ '--disk-cache-dir',
+ metavar="DIRECTORY",
+ help="Filesystem cache location (default `~/.cache/arvados/keep`)",
+ )
+ cache.add_argument(
+ '--directory-cache',
+ type=int,
+ default=128*1024*1024,
+ metavar='BYTES',
+ help="Size of directory data cache in bytes (default 128 MiB)",
+ )
+ cache.add_argument(
+ '--file-cache',
+ type=int,
+ default=0,
+ metavar='BYTES',
+ help="""
+Size of file data cache in bytes
+(default 8 GiB for filesystem cache, 256 MiB for memory cache)
+""",
+ )
+
+ plumbing = self.add_argument_group("Mount interactions with Arvados and Linux")
+ plumbing.add_argument(
+ '--disable-event-listening',
+ action='store_true',
+ dest='disable_event_listening',
+ default=False,
+ help="Don't subscribe to events on the API server to update mount contents",
+ )
+ plumbing.add_argument(
+ '--encoding',
+ default="utf-8",
+ help="""
+Filesystem character encoding
+(default %(default)r; specify a name from the Python codec registry)
+""",
+ )
+ plumbing.add_argument(
+ '--storage-classes',
+ metavar='CLASSES',
+ help="Comma-separated list of storage classes to request for new collections",
+ )
+ # This is a hidden argument used by tests. Normally this
+ # value will be extracted from the cluster config, but mocking
+ # the cluster config under the presence of multiple threads
+ # and processes turned out to be too complicated and brittle.
+ plumbing.add_argument(
+ '--fsns',
+ type=str,
+ default=None,
+ help=argparse.SUPPRESS)
class Mount(object):
def __init__(self, args, logger=logging.getLogger('arvados.arv-mount')):
@@ -128,10 +372,38 @@ class Mount(object):
try:
self._setup_logging()
+ except Exception as e:
+ self.logger.exception("exception during setup: %s", e)
+ exit(1)
+
+ try:
+ nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ minlimit = 10240
+ if self.args.file_cache:
+ # Adjust the file handle limit so it can meet
+ # the desired cache size. Multiply by 8 because the
+ # number of 64 MiB cache slots that keepclient
+ # allocates is RLIMIT_NOFILE / 8
+ minlimit = int((self.args.file_cache/(64*1024*1024)) * 8)
+
+ if nofile_limit[0] < minlimit:
+ resource.setrlimit(resource.RLIMIT_NOFILE, (min(minlimit, nofile_limit[1]), nofile_limit[1]))
+
+ if minlimit > nofile_limit[1]:
+ self.logger.warning("file handles required to meet --file-cache (%s) exceeds hard file handle limit (%s), cache size will be smaller than requested", minlimit, nofile_limit[1])
+
+ except Exception as e:
+ self.logger.warning("unable to adjust file handle limit: %s", e)
+
+ nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ self.logger.info("file cache capped at %s bytes or less based on available disk (RLIMIT_NOFILE is %s)", ((nofile_limit[0]//8)*64*1024*1024), nofile_limit)
+
+ try:
self._setup_api()
self._setup_mount()
except Exception as e:
- self.logger.exception("arv-mount: exception during setup: %s", e)
+ self.logger.exception("exception during setup: %s", e)
exit(1)
def __enter__(self):
@@ -211,12 +483,24 @@ class Mount(object):
def _setup_api(self):
try:
+ # default value of file_cache is 0, this tells KeepBlockCache to
+ # choose a default based on whether disk_cache is enabled or not.
+
+ block_cache = arvados.keep.KeepBlockCache(cache_max=self.args.file_cache,
+ disk_cache=self.args.disk_cache,
+ disk_cache_dir=self.args.disk_cache_dir)
+
self.api = arvados.safeapi.ThreadSafeApiCache(
apiconfig=arvados.config.settings(),
+ api_params={
+ 'num_retries': self.args.retries,
+ },
keep_params={
- 'block_cache': arvados.keep.KeepBlockCache(self.args.file_cache),
+ 'block_cache': block_cache,
'num_retries': self.args.retries,
- })
+ },
+ version='v1',
+ )
except KeyError as e:
self.logger.error("Missing environment: %s", e)
exit(1)
@@ -230,7 +514,8 @@ class Mount(object):
api_client=self.api,
encoding=self.args.encoding,
inode_cache=InodeCache(cap=self.args.directory_cache),
- enable_write=self.args.enable_write)
+ enable_write=self.args.enable_write,
+ fsns=self.args.fsns)
if self.args.crunchstat_interval:
statsthread = threading.Thread(
@@ -244,7 +529,14 @@ class Mount(object):
usr = self.api.users().current().execute(num_retries=self.args.retries)
now = time.time()
dir_class = None
- dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
+ dir_args = [
+ llfuse.ROOT_INODE,
+ self.operations.inodes,
+ self.api,
+ self.args.retries,
+ self.args.enable_write,
+ self.args.filters,
+ ]
mount_readme = False
storage_classes = None
@@ -310,7 +602,11 @@ class Mount(object):
return
e = self.operations.inodes.add_entry(Directory(
- llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
+ llfuse.ROOT_INODE,
+ self.operations.inodes,
+ self.args.enable_write,
+ self.args.filters,
+ ))
dir_args[0] = e.inode
for name in self.args.mount_by_id:
@@ -392,8 +688,9 @@ From here, the following directories are available:
def _llfuse_main(self):
try:
- llfuse.main()
+ llfuse.main(workers=10)
except:
llfuse.close(unmount=False)
raise
+ self.operations.begin_shutdown()
llfuse.close()
diff --git a/services/fuse/arvados_fuse/fresh.py b/services/fuse/arvados_fuse/fresh.py
index 53214ee94d..508ee7fb73 100644
--- a/services/fuse/arvados_fuse/fresh.py
+++ b/services/fuse/arvados_fuse/fresh.py
@@ -62,7 +62,7 @@ class FreshBase(object):
"""
__slots__ = ("_stale", "_poll", "_last_update", "_atime", "_poll_time", "use_count",
- "ref_count", "dead", "cache_size", "cache_uuid", "allow_attr_cache")
+ "ref_count", "cache_size", "cache_uuid", "allow_attr_cache")
def __init__(self):
self._stale = True
@@ -72,7 +72,6 @@ class FreshBase(object):
self._poll_time = 60
self.use_count = 0
self.ref_count = 0
- self.dead = False
self.cache_size = 0
self.cache_uuid = None
@@ -125,17 +124,11 @@ class FreshBase(object):
self.ref_count -= n
return self.ref_count
- def has_ref(self, only_children):
+ def has_ref(self):
"""Determine if there are any kernel references to this
- object or its children.
-
- If only_children is True, ignore refcount of self and only consider
- children.
+ object.
"""
- if only_children:
- return False
- else:
- return self.ref_count > 0
+ return self.ref_count > 0
def objsize(self):
return 0
diff --git a/services/fuse/arvados_fuse/fusedir.py b/services/fuse/arvados_fuse/fusedir.py
index f3816c0d3e..9c78805107 100644
--- a/services/fuse/arvados_fuse/fusedir.py
+++ b/services/fuse/arvados_fuse/fusedir.py
@@ -26,7 +26,7 @@ _logger = logging.getLogger('arvados.arvados_fuse')
# Match any character which FUSE or Linux cannot accommodate as part
# of a filename. (If present in a collection filename, they will
# appear as underscores in the fuse mount.)
-_disallowed_filename_characters = re.compile('[\x00/]')
+_disallowed_filename_characters = re.compile(r'[\x00/]')
class Directory(FreshBase):
@@ -36,7 +36,9 @@ class Directory(FreshBase):
and the value referencing a File or Directory object.
"""
- def __init__(self, parent_inode, inodes, apiconfig, enable_write):
+ __slots__ = ("inode", "parent_inode", "inodes", "_entries", "_mtime", "_enable_write", "_filters")
+
+ def __init__(self, parent_inode, inodes, enable_write, filters):
"""parent_inode is the integer inode number"""
super(Directory, self).__init__()
@@ -46,28 +48,26 @@ class Directory(FreshBase):
raise Exception("parent_inode should be an int")
self.parent_inode = parent_inode
self.inodes = inodes
- self.apiconfig = apiconfig
self._entries = {}
self._mtime = time.time()
self._enable_write = enable_write
-
- def forward_slash_subst(self):
- if not hasattr(self, '_fsns'):
- self._fsns = None
- config = self.apiconfig()
- try:
- self._fsns = config["Collections"]["ForwardSlashNameSubstitution"]
- except KeyError:
- # old API server with no FSNS config
- self._fsns = '_'
+ self._filters = filters or []
+
+ def _filters_for(self, subtype, *, qualified):
+ for f in self._filters:
+ f_type, _, f_name = f[0].partition('.')
+ if not f_name:
+ yield f
+ elif f_type != subtype:
+ pass
+ elif qualified:
+ yield f
else:
- if self._fsns == '' or self._fsns == '/':
- self._fsns = None
- return self._fsns
+ yield [f_name, *f[1:]]
def unsanitize_filename(self, incoming):
"""Replace ForwardSlashNameSubstitution value with /"""
- fsns = self.forward_slash_subst()
+ fsns = self.inodes.forward_slash_subst()
if isinstance(fsns, str):
return incoming.replace(fsns, '/')
else:
@@ -86,7 +86,7 @@ class Directory(FreshBase):
elif dirty == '..':
return '__'
else:
- fsns = self.forward_slash_subst()
+ fsns = self.inodes.forward_slash_subst()
if isinstance(fsns, str):
dirty = dirty.replace('/', fsns)
return _disallowed_filename_characters.sub('_', dirty)
@@ -137,6 +137,10 @@ class Directory(FreshBase):
self.inodes.touch(self)
super(Directory, self).fresh()
+ def objsize(self):
+ # Rough estimate of memory footprint based on using pympler
+ return len(self._entries) * 1024
+
def merge(self, items, fn, same, new_entry):
"""Helper method for updating the contents of the directory.
@@ -144,16 +148,17 @@ class Directory(FreshBase):
entries that are the same in both the old and new lists, create new
entries, and delete old entries missing from the new list.
- :items: iterable with new directory contents
+ Arguments:
+ * items: Iterable --- New directory contents
- :fn: function to take an entry in 'items' and return the desired file or
+ * fn: Callable --- Takes an entry in 'items' and return the desired file or
directory name, or None if this entry should be skipped
- :same: function to compare an existing entry (a File or Directory
+ * same: Callable --- Compare an existing entry (a File or Directory
object) with an entry in the items list to determine whether to keep
the existing entry.
- :new_entry: function to create a new directory entry (File or Directory
+ * new_entry: Callable --- Create a new directory entry (File or Directory
object) from an entry in the items list.
"""
@@ -163,29 +168,43 @@ class Directory(FreshBase):
changed = False
for i in items:
name = self.sanitize_filename(fn(i))
- if name:
- if name in oldentries and same(oldentries[name], i):
+ if not name:
+ continue
+ if name in oldentries:
+ ent = oldentries[name]
+ if same(ent, i) and ent.parent_inode == self.inode:
# move existing directory entry over
- self._entries[name] = oldentries[name]
+ self._entries[name] = ent
del oldentries[name]
- else:
- _logger.debug("Adding entry '%s' to inode %i", name, self.inode)
- # create new directory entry
- ent = new_entry(i)
- if ent is not None:
- self._entries[name] = self.inodes.add_entry(ent)
- changed = True
+ self.inodes.inode_cache.touch(ent)
+
+ for i in items:
+ name = self.sanitize_filename(fn(i))
+ if not name:
+ continue
+ if name not in self._entries:
+ # create new directory entry
+ ent = new_entry(i)
+ if ent is not None:
+ self._entries[name] = self.inodes.add_entry(ent)
+ # need to invalidate this just in case there was a
+ # previous entry that couldn't be moved over or a
+ # lookup that returned file not found and cached
+ # a negative result
+ self.inodes.invalidate_entry(self, name)
+ changed = True
+ _logger.debug("Added entry '%s' as inode %i to parent inode %i", name, ent.inode, self.inode)
# delete any other directory entries that were not in found in 'items'
- for i in oldentries:
- _logger.debug("Forgetting about entry '%s' on inode %i", i, self.inode)
- self.inodes.invalidate_entry(self, i)
- self.inodes.del_entry(oldentries[i])
+ for name, ent in oldentries.items():
+ _logger.debug("Detaching entry '%s' from parent_inode %i", name, self.inode)
+ self.inodes.invalidate_entry(self, name)
+ self.inodes.del_entry(ent)
changed = True
if changed:
- self.inodes.invalidate_inode(self)
self._mtime = time.time()
+ self.inodes.inode_cache.update_cache_size(self)
self.fresh()
@@ -197,27 +216,27 @@ class Directory(FreshBase):
return True
return False
- def has_ref(self, only_children):
- if super(Directory, self).has_ref(only_children):
- return True
- for v in self._entries.values():
- if v.has_ref(False):
- return True
- return False
-
def clear(self):
"""Delete all entries"""
+ if not self._entries:
+ return
oldentries = self._entries
self._entries = {}
- for n in oldentries:
- oldentries[n].clear()
- self.inodes.del_entry(oldentries[n])
self.invalidate()
+ for name, ent in oldentries.items():
+ ent.clear()
+ self.inodes.invalidate_entry(self, name)
+ self.inodes.del_entry(ent)
+ self.inodes.inode_cache.update_cache_size(self)
def kernel_invalidate(self):
# Invalidating the dentry on the parent implies invalidating all paths
# below it as well.
- parent = self.inodes[self.parent_inode]
+ if self.parent_inode in self.inodes:
+ parent = self.inodes[self.parent_inode]
+ else:
+ # parent was removed already.
+ return
# Find self on the parent in order to invalidate this path.
# Calling the public items() method might trigger a refresh,
@@ -270,9 +289,10 @@ class CollectionDirectoryBase(Directory):
"""
- def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection, collection_root):
- super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
- self.apiconfig = apiconfig
+ __slots__ = ("collection", "collection_root", "collection_record_file")
+
+ def __init__(self, parent_inode, inodes, enable_write, filters, collection, collection_root):
+ super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, enable_write, filters)
self.collection = collection
self.collection_root = collection_root
self.collection_record_file = None
@@ -280,14 +300,21 @@ class CollectionDirectoryBase(Directory):
def new_entry(self, name, item, mtime):
name = self.sanitize_filename(name)
if hasattr(item, "fuse_entry") and item.fuse_entry is not None:
- if item.fuse_entry.dead is not True:
- raise Exception("Can only reparent dead inode entry")
+ if item.fuse_entry.parent_inode is not None:
+ raise Exception("Can only reparent unparented inode entry")
if item.fuse_entry.inode is None:
raise Exception("Reparented entry must still have valid inode")
- item.fuse_entry.dead = False
+ item.fuse_entry.parent_inode = self.inode
self._entries[name] = item.fuse_entry
elif isinstance(item, arvados.collection.RichCollectionBase):
- self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item, self.collection_root))
+ self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(
+ self.inode,
+ self.inodes,
+ self._enable_write,
+ self._filters,
+ item,
+ self.collection_root,
+ ))
self._entries[name].populate(mtime)
else:
self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
@@ -428,14 +455,23 @@ class CollectionDirectoryBase(Directory):
def clear(self):
super(CollectionDirectoryBase, self).clear()
+ if self.collection is not None:
+ self.collection.unsubscribe()
self.collection = None
+ def objsize(self):
+ # objsize for the whole collection is represented at the root,
+ # don't double-count it
+ return 0
class CollectionDirectory(CollectionDirectoryBase):
"""Represents the root of a directory tree representing a collection."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
- super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None, self)
+ __slots__ = ("api", "num_retries", "collection_locator",
+ "_manifest_size", "_writable", "_updating_lock")
+
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters=None, collection_record=None, explicit_collection=None):
+ super(CollectionDirectory, self).__init__(parent_inode, inodes, enable_write, filters, None, self)
self.api = api
self.num_retries = num_retries
self._poll = True
@@ -493,7 +529,10 @@ class CollectionDirectory(CollectionDirectoryBase):
if self.collection_record_file is not None:
self.collection_record_file.invalidate()
self.inodes.invalidate_inode(self.collection_record_file)
- _logger.debug("%s invalidated collection record file", self)
+ _logger.debug("parent_inode %s invalidated collection record file inode %s", self.inode,
+ self.collection_record_file.inode)
+ self.inodes.update_uuid(self)
+ self.inodes.inode_cache.update_cache_size(self)
self.fresh()
def uuid(self):
@@ -525,23 +564,15 @@ class CollectionDirectory(CollectionDirectoryBase):
self.collection.update()
new_collection_record = self.collection.api_response()
else:
- # If there's too many prefetch threads and you
- # max out the CPU, delivering data to the FUSE
- # layer actually ends up being slower.
- # Experimentally, capping 7 threads seems to
- # be a sweet spot.
- get_threads = min(max((self.api.keep.block_cache.cache_max // (64 * 1024 * 1024)) - 1, 1), 7)
# Create a new collection object
if uuid_pattern.match(self.collection_locator):
coll_reader = arvados.collection.Collection(
self.collection_locator, self.api, self.api.keep,
- num_retries=self.num_retries,
- get_threads=get_threads)
+ num_retries=self.num_retries)
else:
coll_reader = arvados.collection.CollectionReader(
self.collection_locator, self.api, self.api.keep,
- num_retries=self.num_retries,
- get_threads=get_threads)
+ num_retries=self.num_retries)
new_collection_record = coll_reader.api_response() or {}
# If the Collection only exists in Keep, there will be no API
# response. Fill in the fields we need.
@@ -579,6 +610,7 @@ class CollectionDirectory(CollectionDirectoryBase):
return False
@use_counter
+ @check_update
def collection_record(self):
self.flush()
return self.collection.api_response()
@@ -612,22 +644,32 @@ class CollectionDirectory(CollectionDirectoryBase):
return (self.collection_locator is not None)
def objsize(self):
- # This is an empirically-derived heuristic to estimate the memory used
- # to store this collection's metadata. Calculating the memory
- # footprint directly would be more accurate, but also more complicated.
- return self._manifest_size * 128
+ # This is a rough guess of the amount of overhead involved for
+ # a collection; the assumptions are that that each file
+ # averages 128 bytes in the manifest, but consume 1024 bytes
+ # of Python data structures, so 1024/128=8 means we estimate
+ # the RAM footprint at 8 times the size of bare manifest text.
+ return self._manifest_size * 8
def finalize(self):
- if self.collection is not None:
- if self.writable():
+ if self.collection is None:
+ return
+
+ if self.writable():
+ try:
self.collection.save()
- self.collection.stop_threads()
+ except Exception as e:
+ _logger.exception("Failed to save collection %s", self.collection_locator)
+ self.collection.stop_threads()
def clear(self):
if self.collection is not None:
self.collection.stop_threads()
- super(CollectionDirectory, self).clear()
self._manifest_size = 0
+ super(CollectionDirectory, self).clear()
+ if self.collection_record_file is not None:
+ self.inodes.del_entry(self.collection_record_file)
+ self.collection_record_file = None
class TmpCollectionDirectory(CollectionDirectoryBase):
@@ -645,7 +687,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
def save_new(self):
pass
- def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
+ def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, filters=None, storage_classes=None):
collection = self.UnsaveableCollection(
api_client=api_client,
keep_client=api_client.keep,
@@ -654,7 +696,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
# This is always enable_write=True because it never tries to
# save to the backend
super(TmpCollectionDirectory, self).__init__(
- parent_inode, inodes, api_client.config, True, collection, self)
+ parent_inode, inodes, True, filters, collection, self)
self.populate(self.mtime())
def on_event(self, *args, **kwargs):
@@ -676,7 +718,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
with self.collection.lock:
self.collection_record_file.invalidate()
self.inodes.invalidate_inode(self.collection_record_file)
- _logger.debug("%s invalidated collection record", self)
+ _logger.debug("%s invalidated collection record", self.inode)
finally:
while lockcount > 0:
self.collection.lock.acquire()
@@ -750,8 +792,8 @@ and the directory will appear if it exists.
""".lstrip()
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
- super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, pdh_only=False, storage_classes=None):
+ super(MagicDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.pdh_only = pdh_only
@@ -767,8 +809,14 @@ and the directory will appear if it exists.
# If we're the root directory, add an identical by_id subdirectory.
if self.inode == llfuse.ROOT_INODE:
self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- self.pdh_only))
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ self.pdh_only,
+ ))
def __contains__(self, k):
if k in self._entries:
@@ -782,15 +830,34 @@ and the directory will appear if it exists.
if group_uuid_pattern.match(k):
project = self.api.groups().list(
- filters=[['group_class', 'in', ['project','filter']], ["uuid", "=", k]]).execute(num_retries=self.num_retries)
+ filters=[
+ ['group_class', 'in', ['project','filter']],
+ ["uuid", "=", k],
+ *self._filters_for('groups', qualified=False),
+ ],
+ ).execute(num_retries=self.num_retries)
if project[u'items_available'] == 0:
return False
e = self.inodes.add_entry(ProjectDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- project[u'items'][0], storage_classes=self.storage_classes))
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ project[u'items'][0],
+ storage_classes=self.storage_classes,
+ ))
else:
e = self.inodes.add_entry(CollectionDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ k,
+ ))
if e.update():
if k not in self._entries:
@@ -824,8 +891,8 @@ and the directory will appear if it exists.
class TagsDirectory(Directory):
"""A special directory that contains as subdirectories all tags visible to the user."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
- super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, poll_time=60):
+ super(TagsDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self._poll = True
@@ -839,15 +906,32 @@ class TagsDirectory(Directory):
def update(self):
with llfuse.lock_released:
tags = self.api.links().list(
- filters=[['link_class', '=', 'tag'], ["name", "!=", ""]],
- select=['name'], distinct=True, limit=1000
- ).execute(num_retries=self.num_retries)
+ filters=[
+ ['link_class', '=', 'tag'],
+ ['name', '!=', ''],
+ *self._filters_for('links', qualified=False),
+ ],
+ select=['name'],
+ distinct=True,
+ limit=1000,
+ ).execute(num_retries=self.num_retries)
if "items" in tags:
- self.merge(tags['items']+[{"name": n} for n in self._extra],
- lambda i: i['name'],
- lambda a, i: a.tag == i['name'],
- lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- i['name'], poll=self._poll, poll_time=self._poll_time))
+ self.merge(
+ tags['items']+[{"name": n} for n in self._extra],
+ lambda i: i['name'],
+ lambda a, i: a.tag == i['name'],
+ lambda i: TagDirectory(
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ i['name'],
+ poll=self._poll,
+ poll_time=self._poll_time,
+ ),
+ )
@use_counter
@check_update
@@ -856,7 +940,12 @@ class TagsDirectory(Directory):
return super(TagsDirectory, self).__getitem__(item)
with llfuse.lock_released:
tags = self.api.links().list(
- filters=[['link_class', '=', 'tag'], ['name', '=', item]], limit=1
+ filters=[
+ ['link_class', '=', 'tag'],
+ ['name', '=', item],
+ *self._filters_for('links', qualified=False),
+ ],
+ limit=1,
).execute(num_retries=self.num_retries)
if tags["items"]:
self._extra.add(item)
@@ -881,9 +970,9 @@ class TagDirectory(Directory):
to the user that are tagged with a particular tag.
"""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, tag,
poll=False, poll_time=60):
- super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ super(TagDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.tag = tag
@@ -897,23 +986,40 @@ class TagDirectory(Directory):
def update(self):
with llfuse.lock_released:
taggedcollections = self.api.links().list(
- filters=[['link_class', '=', 'tag'],
- ['name', '=', self.tag],
- ['head_uuid', 'is_a', 'arvados#collection']],
- select=['head_uuid']
- ).execute(num_retries=self.num_retries)
- self.merge(taggedcollections['items'],
- lambda i: i['head_uuid'],
- lambda a, i: a.collection_locator == i['head_uuid'],
- lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
+ filters=[
+ ['link_class', '=', 'tag'],
+ ['name', '=', self.tag],
+ ['head_uuid', 'is_a', 'arvados#collection'],
+ *self._filters_for('links', qualified=False),
+ ],
+ select=['head_uuid'],
+ ).execute(num_retries=self.num_retries)
+ self.merge(
+ taggedcollections['items'],
+ lambda i: i['head_uuid'],
+ lambda a, i: a.collection_locator == i['head_uuid'],
+ lambda i: CollectionDirectory(
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ i['head_uuid'],
+ ),
+ )
class ProjectDirectory(Directory):
"""A special directory that contains the contents of a project."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
- poll=True, poll_time=3, storage_classes=None):
- super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ __slots__ = ("api", "num_retries", "project_object", "project_object_file",
+ "project_uuid", "_updating_lock",
+ "_current_user", "_full_listing", "storage_classes", "recursively_contained")
+
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+ project_object, poll=True, poll_time=3, storage_classes=None):
+ super(ProjectDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.project_object = project_object
@@ -925,19 +1031,32 @@ class ProjectDirectory(Directory):
self._current_user = None
self._full_listing = False
self.storage_classes = storage_classes
+ self.recursively_contained = False
+
+ # Filter groups can contain themselves, which causes tools
+ # that walk the filesystem to get stuck in an infinite loop,
+ # so suppress returning a listing in that case.
+ if self.project_object.get("group_class") == "filter":
+ iter_parent_inode = parent_inode
+ while iter_parent_inode != llfuse.ROOT_INODE:
+ parent_dir = self.inodes[iter_parent_inode]
+ if isinstance(parent_dir, ProjectDirectory) and parent_dir.project_uuid == self.project_uuid:
+ self.recursively_contained = True
+ break
+ iter_parent_inode = parent_dir.parent_inode
def want_event_subscribe(self):
return True
def createDirectory(self, i):
+ common_args = (self.inode, self.inodes, self.api, self.num_retries, self._enable_write, self._filters)
if collection_uuid_pattern.match(i['uuid']):
- return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
+ return CollectionDirectory(*common_args, i)
elif group_uuid_pattern.match(i['uuid']):
- return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- i, self._poll, self._poll_time, self.storage_classes)
+ return ProjectDirectory(*common_args, i, self._poll, self._poll_time, self.storage_classes)
elif link_uuid_pattern.match(i['uuid']):
if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
- return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
+ return CollectionDirectory(*common_args, i['head_uuid'])
else:
return None
elif uuid_pattern.match(i['uuid']):
@@ -975,7 +1094,7 @@ class ProjectDirectory(Directory):
self.project_object_file = ObjectFile(self.inode, self.project_object)
self.inodes.add_entry(self.project_object_file)
- if not self._full_listing:
+ if self.recursively_contained or not self._full_listing:
return True
def samefn(a, i):
@@ -998,20 +1117,27 @@ class ProjectDirectory(Directory):
self.project_object = self.api.users().get(
uuid=self.project_uuid).execute(num_retries=self.num_retries)
# do this in 2 steps until #17424 is fixed
- contents = list(arvados.util.keyset_list_all(self.api.groups().contents,
- order_key="uuid",
- num_retries=self.num_retries,
- uuid=self.project_uuid,
- filters=[["uuid", "is_a", "arvados#group"],
- ["groups.group_class", "in", ["project","filter"]]]))
- contents.extend(filter(lambda i: i["current_version_uuid"] == i["uuid"],
- arvados.util.keyset_list_all(self.api.groups().contents,
- order_key="uuid",
- num_retries=self.num_retries,
- uuid=self.project_uuid,
- filters=[["uuid", "is_a", "arvados#collection"]])))
-
-
+ contents = list(arvados.util.keyset_list_all(
+ self.api.groups().contents,
+ order_key='uuid',
+ num_retries=self.num_retries,
+ uuid=self.project_uuid,
+ filters=[
+ ['uuid', 'is_a', 'arvados#group'],
+ ['groups.group_class', 'in', ['project', 'filter']],
+ *self._filters_for('groups', qualified=True),
+ ],
+ ))
+ contents.extend(obj for obj in arvados.util.keyset_list_all(
+ self.api.groups().contents,
+ order_key='uuid',
+ num_retries=self.num_retries,
+ uuid=self.project_uuid,
+ filters=[
+ ['uuid', 'is_a', 'arvados#collection'],
+ *self._filters_for('collections', qualified=True),
+ ],
+ ) if obj['current_version_uuid'] == obj['uuid'])
# end with llfuse.lock_released, re-acquire lock
self.merge(contents,
@@ -1040,14 +1166,24 @@ class ProjectDirectory(Directory):
namefilter = ["name", "=", k]
else:
namefilter = ["name", "in", [k, k2]]
- contents = self.api.groups().list(filters=[["owner_uuid", "=", self.project_uuid],
- ["group_class", "in", ["project","filter"]],
- namefilter],
- limit=2).execute(num_retries=self.num_retries)["items"]
+ contents = self.api.groups().list(
+ filters=[
+ ["owner_uuid", "=", self.project_uuid],
+ ["group_class", "in", ["project","filter"]],
+ namefilter,
+ *self._filters_for('groups', qualified=False),
+ ],
+ limit=2,
+ ).execute(num_retries=self.num_retries)["items"]
if not contents:
- contents = self.api.collections().list(filters=[["owner_uuid", "=", self.project_uuid],
- namefilter],
- limit=2).execute(num_retries=self.num_retries)["items"]
+ contents = self.api.collections().list(
+ filters=[
+ ["owner_uuid", "=", self.project_uuid],
+ namefilter,
+ *self._filters_for('collections', qualified=False),
+ ],
+ limit=2,
+ ).execute(num_retries=self.num_retries)["items"]
if contents:
if len(contents) > 1 and contents[1]['name'] == k:
# If "foo/bar" and "foo[SUBST]bar" both exist, use
@@ -1084,6 +1220,12 @@ class ProjectDirectory(Directory):
def persisted(self):
return True
+ def clear(self):
+ super(ProjectDirectory, self).clear()
+ if self.project_object_file is not None:
+ self.inodes.del_entry(self.project_object_file)
+ self.project_object_file = None
+
@use_counter
@check_update
def mkdir(self, name):
@@ -1201,9 +1343,9 @@ class ProjectDirectory(Directory):
class SharedDirectory(Directory):
"""A special directory that represents users or groups who have shared projects with me."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
- poll=False, poll_time=60, storage_classes=None):
- super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+ exclude, poll=False, poll_time=60, storage_classes=None):
+ super(SharedDirectory, self).__init__(parent_inode, inodes, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1229,11 +1371,17 @@ class SharedDirectory(Directory):
if 'httpMethod' in methods.get('shared', {}):
page = []
while True:
- resp = self.api.groups().shared(filters=[['group_class', 'in', ['project','filter']]]+page,
- order="uuid",
- limit=10000,
- count="none",
- include="owner_uuid").execute()
+ resp = self.api.groups().shared(
+ filters=[
+ ['group_class', 'in', ['project','filter']],
+ *page,
+ *self._filters_for('groups', qualified=False),
+ ],
+ order="uuid",
+ limit=10000,
+ count="none",
+ include="owner_uuid",
+ ).execute()
if not resp["items"]:
break
page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
@@ -1248,8 +1396,12 @@ class SharedDirectory(Directory):
self.api.groups().list,
order_key="uuid",
num_retries=self.num_retries,
- filters=[['group_class','in',['project','filter']]],
- select=["uuid", "owner_uuid"]))
+ filters=[
+ ['group_class', 'in', ['project','filter']],
+ *self._filters_for('groups', qualified=False),
+ ],
+ select=["uuid", "owner_uuid"],
+ ))
for ob in all_projects:
objects[ob['uuid']] = ob
@@ -1263,13 +1415,20 @@ class SharedDirectory(Directory):
self.api.users().list,
order_key="uuid",
num_retries=self.num_retries,
- filters=[['uuid','in', list(root_owners)]])
+ filters=[
+ ['uuid', 'in', list(root_owners)],
+ *self._filters_for('users', qualified=False),
+ ],
+ )
lgroups = arvados.util.keyset_list_all(
self.api.groups().list,
order_key="uuid",
num_retries=self.num_retries,
- filters=[['uuid','in', list(root_owners)+roots]])
-
+ filters=[
+ ['uuid', 'in', list(root_owners)+roots],
+ *self._filters_for('groups', qualified=False),
+ ],
+ )
for l in lusers:
objects[l["uuid"]] = l
for l in lgroups:
@@ -1291,11 +1450,23 @@ class SharedDirectory(Directory):
# end with llfuse.lock_released, re-acquire lock
- self.merge(contents.items(),
- lambda i: i[0],
- lambda a, i: a.uuid() == i[1]['uuid'],
- lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+ self.merge(
+ contents.items(),
+ lambda i: i[0],
+ lambda a, i: a.uuid() == i[1]['uuid'],
+ lambda i: ProjectDirectory(
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ i[1],
+ poll=self._poll,
+ poll_time=self._poll_time,
+ storage_classes=self.storage_classes,
+ ),
+ )
except Exception:
_logger.exception("arv-mount shared dir error")
finally:
diff --git a/services/fuse/arvados_fuse/fusefile.py b/services/fuse/arvados_fuse/fusefile.py
index 45d3db16fe..9279f7d99d 100644
--- a/services/fuse/arvados_fuse/fusefile.py
+++ b/services/fuse/arvados_fuse/fusefile.py
@@ -80,9 +80,17 @@ class FuseArvadosFile(File):
if self.writable():
self.arvfile.parent.root_collection().save()
+ def clear(self):
+ if self.parent_inode is None:
+ self.arvfile.fuse_entry = None
+ self.arvfile = None
+
class StringFile(File):
"""Wrap a simple string as a file"""
+
+ __slots__ = ("contents",)
+
def __init__(self, parent_inode, contents, _mtime):
super(StringFile, self).__init__(parent_inode, _mtime)
self.contents = contents
@@ -97,6 +105,8 @@ class StringFile(File):
class ObjectFile(StringFile):
"""Wrap a dict as a serialized json object."""
+ __slots__ = ("object_uuid",)
+
def __init__(self, parent_inode, obj):
super(ObjectFile, self).__init__(parent_inode, "", 0)
self.object_uuid = obj['uuid']
@@ -125,6 +135,9 @@ class FuncToJSONFile(StringFile):
The function is called at the time the file is read. The result is
cached until invalidate() is called.
"""
+
+ __slots__ = ("func",)
+
def __init__(self, parent_inode, func):
super(FuncToJSONFile, self).__init__(parent_inode, "", 0)
self.func = func
diff --git a/services/fuse/arvados_fuse/unmount.py b/services/fuse/arvados_fuse/unmount.py
index 12d047a8f3..144c582ddc 100644
--- a/services/fuse/arvados_fuse/unmount.py
+++ b/services/fuse/arvados_fuse/unmount.py
@@ -154,6 +154,16 @@ def unmount(path, subtype=None, timeout=10, recursive=False):
path = os.path.realpath(path)
continue
elif not mounted:
+ if was_mounted:
+ # This appears to avoid a race condition where we
+ # return control to the caller after running
+ # "fusermount -u -z" (see below), the caller (e.g.,
+ # arv-mount --replace) immediately tries to attach a
+ # new fuse mount at the same mount point, the
+ # lazy-unmount process unmounts that _new_ mount while
+ # it is being initialized, and the setup code waits
+ # forever for the new mount to be initialized.
+ time.sleep(1)
return was_mounted
if attempted:
diff --git a/services/fuse/arvados_version.py b/services/fuse/arvados_version.py
index d8eec3d9ee..794b6afe42 100644
--- a/services/fuse/arvados_version.py
+++ b/services/fuse/arvados_version.py
@@ -1,58 +1,145 @@
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
+#
+# This file runs in one of three modes:
+#
+# 1. If the ARVADOS_BUILDING_VERSION environment variable is set, it writes
+# _version.py and generates dependencies based on that value.
+# 2. If running from an arvados Git checkout, it writes _version.py
+# and generates dependencies from Git.
+# 3. Otherwise, we expect this is source previously generated from Git, and
+# it reads _version.py and generates dependencies from it.
-import subprocess
-import time
import os
import re
+import runpy
+import subprocess
import sys
-SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
-VERSION_PATHS = {
- SETUP_DIR,
- os.path.abspath(os.path.join(SETUP_DIR, "../../sdk/python")),
- os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
- }
+from pathlib import Path
+
+# These maps explain the relationships between different Python modules in
+# the arvados repository. We use these to help generate setup.py.
+PACKAGE_DEPENDENCY_MAP = {
+ 'arvados-cwl-runner': ['arvados-python-client', 'crunchstat_summary'],
+ 'arvados-user-activity': ['arvados-python-client'],
+ 'arvados_fuse': ['arvados-python-client'],
+ 'crunchstat_summary': ['arvados-python-client'],
+}
+PACKAGE_MODULE_MAP = {
+ 'arvados-cwl-runner': 'arvados_cwl',
+ 'arvados-docker-cleaner': 'arvados_docker',
+ 'arvados-python-client': 'arvados',
+ 'arvados-user-activity': 'arvados_user_activity',
+ 'arvados_fuse': 'arvados_fuse',
+ 'crunchstat_summary': 'crunchstat_summary',
+}
+PACKAGE_SRCPATH_MAP = {
+ 'arvados-cwl-runner': Path('sdk', 'cwl'),
+ 'arvados-docker-cleaner': Path('services', 'dockercleaner'),
+ 'arvados-python-client': Path('sdk', 'python'),
+ 'arvados-user-activity': Path('tools', 'user-activity'),
+ 'arvados_fuse': Path('services', 'fuse'),
+ 'crunchstat_summary': Path('tools', 'crunchstat-summary'),
+}
+
+ENV_VERSION = os.environ.get("ARVADOS_BUILDING_VERSION")
+SETUP_DIR = Path(__file__).absolute().parent
+try:
+ REPO_PATH = Path(subprocess.check_output(
+ ['git', '-C', str(SETUP_DIR), 'rev-parse', '--show-toplevel'],
+ stderr=subprocess.DEVNULL,
+ text=True,
+ ).rstrip('\n'))
+except (subprocess.CalledProcessError, OSError):
+ REPO_PATH = None
+else:
+ # Verify this is the arvados monorepo
+ if all((REPO_PATH / path).exists() for path in PACKAGE_SRCPATH_MAP.values()):
+ PACKAGE_NAME, = (
+ pkg_name for pkg_name, path in PACKAGE_SRCPATH_MAP.items()
+ if (REPO_PATH / path) == SETUP_DIR
+ )
+ MODULE_NAME = PACKAGE_MODULE_MAP[PACKAGE_NAME]
+ VERSION_SCRIPT_PATH = Path(REPO_PATH, 'build', 'version-at-commit.sh')
+ else:
+ REPO_PATH = None
+if REPO_PATH is None:
+ (PACKAGE_NAME, MODULE_NAME), = (
+ (pkg_name, mod_name)
+ for pkg_name, mod_name in PACKAGE_MODULE_MAP.items()
+ if (SETUP_DIR / mod_name).is_dir()
+ )
+
+def short_tests_only(arglist=sys.argv):
+ try:
+ arglist.remove('--short-tests-only')
+ except ValueError:
+ return False
+ else:
+ return True
+
+def git_log_output(path, *args):
+ return subprocess.check_output(
+ ['git', '-C', str(REPO_PATH),
+ 'log', '--first-parent', '--max-count=1',
+ *args, str(path)],
+ text=True,
+ ).rstrip('\n')
def choose_version_from():
- ts = {}
- for path in VERSION_PATHS:
- ts[subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', path]).strip()] = path
-
- sorted_ts = sorted(ts.items())
- getver = sorted_ts[-1][1]
- print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+ ver_paths = [SETUP_DIR, VERSION_SCRIPT_PATH, *(
+ PACKAGE_SRCPATH_MAP[pkg]
+ for pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ())
+ )]
+ getver = max(ver_paths, key=lambda path: git_log_output(path, '--format=format:%ct'))
+ print(f"Using {getver} for version number calculation of {SETUP_DIR}", file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
- myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
- '--format=%H', curdir]).strip()
- myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
- return myversion
+ myhash = git_log_output(curdir, '--format=%H')
+ return subprocess.check_output(
+ [str(VERSION_SCRIPT_PATH), myhash],
+ text=True,
+ ).rstrip('\n')
def save_version(setup_dir, module, v):
- v = v.replace("~dev", ".dev").replace("~rc", "rc")
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ with Path(setup_dir, module, '_version.py').open('w') as fp:
+ print(f"__version__ = {v!r}", file=fp)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
-
-def get_version(setup_dir, module):
- env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+ file_vars = runpy.run_path(Path(setup_dir, module, '_version.py'))
+ return file_vars['__version__']
- if env_version:
- save_version(setup_dir, module, env_version)
+def get_version(setup_dir=SETUP_DIR, module=MODULE_NAME):
+ if ENV_VERSION:
+ version = ENV_VERSION
+ elif REPO_PATH is None:
+ return read_version(setup_dir, module)
else:
- try:
- save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError) as err:
- print("ERROR: {0}".format(err), file=sys.stderr)
- pass
+ version = git_version_at_commit()
+ version = version.replace("~dev", ".dev").replace("~rc", "rc")
+ save_version(setup_dir, module, version)
+ return version
+
+def iter_dependencies(version=None):
+ if version is None:
+ version = get_version()
+ # A packaged development release should be installed with other
+ # development packages built from the same source, but those
+ # dependencies may have earlier "dev" versions (read: less recent
+ # Git commit timestamps). This compatible version dependency
+ # expresses that as closely as possible. Allowing versions
+ # compatible with .dev0 allows any development release.
+ # Regular expression borrowed partially from
+ #
+ dep_ver, match_count = re.subn(r'\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)
+ dep_op = '~=' if match_count else '=='
+ for dep_pkg in PACKAGE_DEPENDENCY_MAP.get(PACKAGE_NAME, ()):
+ yield f'{dep_pkg}{dep_op}{dep_ver}'
- return read_version(setup_dir, module)
+# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
+if __name__ == '__main__':
+ print(get_version())
diff --git a/services/fuse/fpm-info.sh b/services/fuse/fpm-info.sh
index f789abe692..4d98172f8d 100644
--- a/services/fuse/fpm-info.sh
+++ b/services/fuse/fpm-info.sh
@@ -5,7 +5,7 @@
fpm_depends+=(fuse)
case "$TARGET" in
- centos*)
+ centos*|rocky*)
fpm_depends+=(fuse-libs)
;;
debian* | ubuntu*)
diff --git a/services/fuse/setup.py b/services/fuse/setup.py
index 545b4bfa01..5a77174c62 100644
--- a/services/fuse/setup.py
+++ b/services/fuse/setup.py
@@ -10,21 +10,10 @@ import re
from setuptools import setup, find_packages
-SETUP_DIR = os.path.dirname(__file__) or '.'
-README = os.path.join(SETUP_DIR, 'README.rst')
-
import arvados_version
-version = arvados_version.get_version(SETUP_DIR, "arvados_fuse")
-if os.environ.get('ARVADOS_BUILDING_VERSION', False):
- pysdk_dep = "=={}".format(version)
-else:
- # On dev releases, arvados-python-client may have a different timestamp
- pysdk_dep = "<={}".format(version)
-
-short_tests_only = False
-if '--short-tests-only' in sys.argv:
- short_tests_only = True
- sys.argv.remove('--short-tests-only')
+version = arvados_version.get_version()
+short_tests_only = arvados_version.short_tests_only()
+README = os.path.join(arvados_version.SETUP_DIR, 'README.rst')
setup(name='arvados_fuse',
version=version,
@@ -43,22 +32,19 @@ setup(name='arvados_fuse',
('share/doc/arvados_fuse', ['agpl-3.0.txt', 'README.rst']),
],
install_requires=[
- 'arvados-python-client{}'.format(pysdk_dep),
- 'llfuse >= 1.3.6',
+ *arvados_version.iter_dependencies(version),
+ 'arvados-llfuse >= 1.5.1',
'future',
'python-daemon',
'ciso8601 >= 2.0.0',
'setuptools',
"prometheus_client"
],
- extras_require={
- ':python_version<"3"': ['pytz'],
- },
+ python_requires="~=3.8",
classifiers=[
- 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
test_suite='tests',
- tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
+ tests_require=['PyYAML', 'parameterized',],
zip_safe=False
)
diff --git a/services/fuse/tests/integration_test.py b/services/fuse/tests/integration_test.py
index 89b39dbc87..e80b6983a1 100644
--- a/services/fuse/tests/integration_test.py
+++ b/services/fuse/tests/integration_test.py
@@ -86,7 +86,7 @@ class IntegrationTest(unittest.TestCase):
with arvados_fuse.command.Mount(
arvados_fuse.command.ArgumentParser().parse_args(
argv + ['--foreground',
- '--unmount-timeout=2',
+ '--unmount-timeout=60',
self.mnt])) as self.mount:
return func(self, *args, **kwargs)
finally:
diff --git a/services/fuse/tests/mount_test_base.py b/services/fuse/tests/mount_test_base.py
index 7cf8aa373a..02f4009724 100644
--- a/services/fuse/tests/mount_test_base.py
+++ b/services/fuse/tests/mount_test_base.py
@@ -4,6 +4,7 @@
from __future__ import absolute_import
import arvados
+import arvados.keep
import arvados_fuse as fuse
import arvados.safeapi
import llfuse
@@ -24,7 +25,16 @@ logger = logging.getLogger('arvados.arv-mount')
from .integration_test import workerPool
+def make_block_cache(disk_cache):
+ if disk_cache:
+ disk_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "arvados", "keep")
+ shutil.rmtree(disk_cache_dir, ignore_errors=True)
+ block_cache = arvados.keep.KeepBlockCache(disk_cache=disk_cache)
+ return block_cache
+
class MountTestBase(unittest.TestCase):
+ disk_cache = False
+
def setUp(self, api=None, local_store=True):
# The underlying C implementation of open() makes a fstat() syscall
# with the GIL still held. When the GETATTR message comes back to
@@ -43,7 +53,12 @@ class MountTestBase(unittest.TestCase):
self.mounttmp = tempfile.mkdtemp()
run_test_server.run()
run_test_server.authorize_with("admin")
- self.api = api if api else arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
+
+ self.api = api if api else arvados.safeapi.ThreadSafeApiCache(
+ arvados.config.settings(),
+ keep_params={"block_cache": make_block_cache(self.disk_cache)},
+ version='v1',
+ )
self.llfuse_thread = None
# This is a copy of Mount's method. TODO: Refactor MountTestBase
@@ -57,15 +72,22 @@ class MountTestBase(unittest.TestCase):
llfuse.close()
def make_mount(self, root_class, **root_kwargs):
- enable_write = True
- if 'enable_write' in root_kwargs:
- enable_write = root_kwargs.pop('enable_write')
+ enable_write = root_kwargs.pop('enable_write', True)
self.operations = fuse.Operations(
- os.getuid(), os.getgid(),
+ os.getuid(),
+ os.getgid(),
api_client=self.api,
- enable_write=enable_write)
+ enable_write=enable_write,
+ )
self.operations.inodes.add_entry(root_class(
- llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
+ llfuse.ROOT_INODE,
+ self.operations.inodes,
+ self.api,
+ 0,
+ enable_write,
+ root_kwargs.pop('filters', None),
+ **root_kwargs,
+ ))
llfuse.init(self.operations, self.mounttmp, [])
self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
self.llfuse_thread.daemon = True
@@ -80,10 +102,10 @@ class MountTestBase(unittest.TestCase):
self.operations.events.close(timeout=10)
subprocess.call(["fusermount", "-u", "-z", self.mounttmp])
t0 = time.time()
- self.llfuse_thread.join(timeout=10)
+ self.llfuse_thread.join(timeout=60)
if self.llfuse_thread.is_alive():
logger.warning("MountTestBase.tearDown():"
- " llfuse thread still alive 10s after umount"
+ " llfuse thread still alive 60s after umount"
" -- exiting with SIGKILL")
os.kill(os.getpid(), signal.SIGKILL)
waited = time.time() - t0
diff --git a/services/fuse/tests/test_command_args.py b/services/fuse/tests/test_command_args.py
index ed59029628..90153d22d1 100644
--- a/services/fuse/tests/test_command_args.py
+++ b/services/fuse/tests/test_command_args.py
@@ -14,12 +14,14 @@ import io
import json
import llfuse
import logging
-import mock
import os
from . import run_test_server
import sys
import tempfile
import unittest
+import resource
+
+from unittest import mock
def noexit(func):
"""If argparse or arvados_fuse tries to exit, fail the test instead"""
@@ -261,6 +263,50 @@ class MountArgsTest(unittest.TestCase):
'--foreground', self.mntdir])
arvados_fuse.command.Mount(args)
+ @noexit
+ @mock.patch('resource.setrlimit')
+ @mock.patch('resource.getrlimit')
+ def test_default_file_cache(self, getrlimit, setrlimit):
+ args = arvados_fuse.command.ArgumentParser().parse_args([
+ '--foreground', self.mntdir])
+ self.assertEqual(args.mode, None)
+ getrlimit.return_value = (1024, 1048576)
+ self.mnt = arvados_fuse.command.Mount(args)
+ setrlimit.assert_called_with(resource.RLIMIT_NOFILE, (10240, 1048576))
+
+ @noexit
+ @mock.patch('resource.setrlimit')
+ @mock.patch('resource.getrlimit')
+ def test_small_file_cache(self, getrlimit, setrlimit):
+ args = arvados_fuse.command.ArgumentParser().parse_args([
+ '--foreground', '--file-cache=256000000', self.mntdir])
+ self.assertEqual(args.mode, None)
+ getrlimit.return_value = (1024, 1048576)
+ self.mnt = arvados_fuse.command.Mount(args)
+ setrlimit.assert_not_called()
+
+ @noexit
+ @mock.patch('resource.setrlimit')
+ @mock.patch('resource.getrlimit')
+ def test_large_file_cache(self, getrlimit, setrlimit):
+ args = arvados_fuse.command.ArgumentParser().parse_args([
+ '--foreground', '--file-cache=256000000000', self.mntdir])
+ self.assertEqual(args.mode, None)
+ getrlimit.return_value = (1024, 1048576)
+ self.mnt = arvados_fuse.command.Mount(args)
+ setrlimit.assert_called_with(resource.RLIMIT_NOFILE, (30517, 1048576))
+
+ @noexit
+ @mock.patch('resource.setrlimit')
+ @mock.patch('resource.getrlimit')
+ def test_file_cache_hard_limit(self, getrlimit, setrlimit):
+ args = arvados_fuse.command.ArgumentParser().parse_args([
+ '--foreground', '--file-cache=256000000000', self.mntdir])
+ self.assertEqual(args.mode, None)
+ getrlimit.return_value = (1024, 2048)
+ self.mnt = arvados_fuse.command.Mount(args)
+ setrlimit.assert_called_with(resource.RLIMIT_NOFILE, (2048, 2048))
+
class MountErrorTest(unittest.TestCase):
def setUp(self):
self.mntdir = tempfile.mkdtemp()
@@ -292,7 +338,7 @@ class MountErrorTest(unittest.TestCase):
def test_bogus_host(self):
arvados.config._settings["ARVADOS_API_HOST"] = "100::"
- with self.assertRaises(SystemExit) as ex:
+ with self.assertRaises(SystemExit) as ex, mock.patch('time.sleep'):
args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])
arvados_fuse.command.Mount(args, logger=self.logger).run()
self.assertEqual(1, ex.exception.code)
diff --git a/services/fuse/tests/test_exec.py b/services/fuse/tests/test_exec.py
index 6af60302bc..f977990026 100644
--- a/services/fuse/tests/test_exec.py
+++ b/services/fuse/tests/test_exec.py
@@ -9,16 +9,12 @@ import json
import multiprocessing
import os
from . import run_test_server
+import shlex
import tempfile
import unittest
from .integration_test import workerPool
-try:
- from shlex import quote
-except:
- from pipes import quote
-
def try_exec(mnt, cmd):
try:
os.environ['KEEP_LOCAL_STORE'] = tempfile.mkdtemp()
@@ -56,11 +52,11 @@ class ExecMode(unittest.TestCase):
def test_exec(self):
workerPool().apply(try_exec, (self.mnt, [
- 'sh', '-c',
- 'echo -n foo >{}; cp {} {}'.format(
- quote(os.path.join(self.mnt, 'zzz', 'foo.txt')),
- quote(os.path.join(self.mnt, 'zzz', '.arvados#collection')),
- quote(os.path.join(self.okfile)))]))
+ 'sh', '-c', 'echo -n foo >{}; cp {} {}'.format(
+ shlex.quote(os.path.join(self.mnt, 'zzz', 'foo.txt')),
+ shlex.quote(os.path.join(self.mnt, 'zzz', '.arvados#collection')),
+ shlex.quote(os.path.join(self.okfile)),
+ )]))
with open(self.okfile) as f:
assertRegex(
self,
diff --git a/services/fuse/tests/test_inodes.py b/services/fuse/tests/test_inodes.py
index 07e6036d08..cc22f521e0 100644
--- a/services/fuse/tests/test_inodes.py
+++ b/services/fuse/tests/test_inodes.py
@@ -3,15 +3,21 @@
# SPDX-License-Identifier: AGPL-3.0
import arvados_fuse
-import mock
import unittest
import llfuse
import logging
+from unittest import mock
+
class InodeTests(unittest.TestCase):
+
+ # The following tests call next(inodes._counter) because inode 1
+ # (the root directory) gets special treatment.
+
def test_inodes_basic(self):
cache = arvados_fuse.InodeCache(1000, 4)
inodes = arvados_fuse.Inodes(cache)
+ next(inodes._counter)
# Check that ent1 gets added to inodes
ent1 = mock.MagicMock()
@@ -27,6 +33,7 @@ class InodeTests(unittest.TestCase):
def test_inodes_not_persisted(self):
cache = arvados_fuse.InodeCache(1000, 4)
inodes = arvados_fuse.Inodes(cache)
+ next(inodes._counter)
ent1 = mock.MagicMock()
ent1.in_use.return_value = False
@@ -48,6 +55,7 @@ class InodeTests(unittest.TestCase):
def test_inode_cleared(self):
cache = arvados_fuse.InodeCache(1000, 4)
inodes = arvados_fuse.Inodes(cache)
+ next(inodes._counter)
# Check that ent1 gets added to inodes
ent1 = mock.MagicMock()
@@ -68,25 +76,31 @@ class InodeTests(unittest.TestCase):
inodes.add_entry(ent3)
# Won't clear anything because min_entries = 4
- self.assertEqual(2, len(cache._entries))
+ self.assertEqual(2, len(cache._cache_entries))
self.assertFalse(ent1.clear.called)
self.assertEqual(1100, cache.total())
# Change min_entries
cache.min_entries = 1
- cache.cap_cache()
+ ent1.parent_inode = None
+ inodes.cap_cache()
+ inodes.wait_remove_queue_empty()
self.assertEqual(600, cache.total())
self.assertTrue(ent1.clear.called)
# Touching ent1 should cause ent3 to get cleared
+ ent3.parent_inode = None
self.assertFalse(ent3.clear.called)
- cache.touch(ent1)
+ inodes.inode_cache.update_cache_size(ent1)
+ inodes.touch(ent1)
+ inodes.wait_remove_queue_empty()
self.assertTrue(ent3.clear.called)
self.assertEqual(500, cache.total())
def test_clear_in_use(self):
cache = arvados_fuse.InodeCache(1000, 4)
inodes = arvados_fuse.Inodes(cache)
+ next(inodes._counter)
ent1 = mock.MagicMock()
ent1.in_use.return_value = True
@@ -109,10 +123,12 @@ class InodeTests(unittest.TestCase):
ent3.clear.called = False
self.assertFalse(ent1.clear.called)
self.assertFalse(ent3.clear.called)
- cache.touch(ent3)
+ inodes.touch(ent3)
+ inodes.wait_remove_queue_empty()
self.assertFalse(ent1.clear.called)
self.assertFalse(ent3.clear.called)
- self.assertFalse(ent3.kernel_invalidate.called)
+ # kernel invalidate gets called anyway
+ self.assertTrue(ent3.kernel_invalidate.called)
self.assertEqual(1100, cache.total())
# ent1 still in use, ent3 doesn't have ref,
@@ -120,14 +136,17 @@ class InodeTests(unittest.TestCase):
ent3.has_ref.return_value = False
ent1.clear.called = False
ent3.clear.called = False
- cache.touch(ent3)
+ ent3.parent_inode = None
+ inodes.touch(ent3)
+ inodes.wait_remove_queue_empty()
self.assertFalse(ent1.clear.called)
self.assertTrue(ent3.clear.called)
self.assertEqual(500, cache.total())
def test_delete(self):
- cache = arvados_fuse.InodeCache(1000, 4)
+ cache = arvados_fuse.InodeCache(1000, 0)
inodes = arvados_fuse.Inodes(cache)
+ next(inodes._counter)
ent1 = mock.MagicMock()
ent1.in_use.return_value = False
@@ -147,6 +166,9 @@ class InodeTests(unittest.TestCase):
ent1.ref_count = 0
with llfuse.lock:
inodes.del_entry(ent1)
+ inodes.wait_remove_queue_empty()
self.assertEqual(0, cache.total())
- cache.touch(ent3)
+
+ inodes.add_entry(ent3)
+ inodes.wait_remove_queue_empty()
self.assertEqual(600, cache.total())
diff --git a/services/fuse/tests/test_mount.py b/services/fuse/tests/test_mount.py
index 1601db5944..2d775c0608 100644
--- a/services/fuse/tests/test_mount.py
+++ b/services/fuse/tests/test_mount.py
@@ -6,19 +6,24 @@ from __future__ import absolute_import
from future.utils import viewitems
from builtins import str
from builtins import object
+from pathlib import Path
from six import assertRegex
+import errno
import json
import llfuse
import logging
-import mock
import os
import subprocess
import time
import unittest
import tempfile
+import parameterized
+
+from unittest import mock
import arvados
import arvados_fuse as fuse
+from arvados_fuse import fusedir
from . import run_test_server
from .integration_test import IntegrationTest
@@ -54,7 +59,7 @@ class AssertWithTimeout(object):
else:
self.done = True
-
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseMountTest(MountTestBase):
def setUp(self):
super(FuseMountTest, self).setUp()
@@ -125,6 +130,7 @@ class FuseMountTest(MountTestBase):
self.assertEqual(v, f.read().decode())
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseMagicTest(MountTestBase):
def setUp(self, api=None):
super(FuseMagicTest, self).setUp(api=api)
@@ -283,6 +289,7 @@ def fuseSharedTestHelper(mounttmp):
Test().runTest()
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseSharedTest(MountTestBase):
def runTest(self):
self.make_mount(fuse.SharedDirectory,
@@ -343,6 +350,7 @@ def fuseModifyFileTestHelperReadEndContents(mounttmp):
self.assertEqual("plnp", f.read())
Test().runTest()
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseModifyFileTest(MountTestBase):
def runTest(self):
collection = arvados.collection.Collection(api_client=self.api)
@@ -363,6 +371,7 @@ class FuseModifyFileTest(MountTestBase):
self.pool.apply(fuseModifyFileTestHelperReadEndContents, (self.mounttmp,))
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseAddFileToCollectionTest(MountTestBase):
def runTest(self):
collection = arvados.collection.Collection(api_client=self.api)
@@ -385,6 +394,7 @@ class FuseAddFileToCollectionTest(MountTestBase):
self.assertEqual(["file1.txt", "file2.txt"], sorted(d1))
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseRemoveFileFromCollectionTest(MountTestBase):
def runTest(self):
collection = arvados.collection.Collection(api_client=self.api)
@@ -416,6 +426,7 @@ def fuseCreateFileTestHelper(mounttmp):
pass
Test().runTest()
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseCreateFileTest(MountTestBase):
def runTest(self):
collection = arvados.collection.Collection(api_client=self.api)
@@ -459,6 +470,7 @@ def fuseWriteFileTestHelperReadFile(mounttmp):
self.assertEqual(f.read(), "Hello world!")
Test().runTest()
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseWriteFileTest(MountTestBase):
def runTest(self):
collection = arvados.collection.Collection(api_client=self.api)
@@ -507,6 +519,7 @@ def fuseUpdateFileTestHelper(mounttmp):
Test().runTest()
+@parameterized.parameterized_class([{"disk_cache": True}, {"disk_cache": False}])
class FuseUpdateFileTest(MountTestBase):
def runTest(self):
collection = arvados.collection.Collection(api_client=self.api)
@@ -1114,7 +1127,10 @@ class MagicDirApiError(FuseMagicTest):
class SanitizeFilenameTest(MountTestBase):
def test_sanitize_filename(self):
- pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
+ pdir = fuse.ProjectDirectory(
+ 1, fuse.Inodes(None), self.api, 0, False, None,
+ project_object=self.api.users().current().execute(),
+ )
acceptable = [
"foo.txt",
".foo",
@@ -1212,20 +1228,22 @@ class SlashSubstitutionTest(IntegrationTest):
mnt_args = [
'--read-write',
'--mount-home', 'zzz',
+ '--fsns', '[SLASH]'
]
def setUp(self):
super(SlashSubstitutionTest, self).setUp()
- self.api = arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
- self.api.config = lambda: {"Collections": {"ForwardSlashNameSubstitution": "[SLASH]"}}
+
+ self.api = arvados.safeapi.ThreadSafeApiCache(
+ arvados.config.settings(),
+ version='v1'
+ )
self.testcoll = self.api.collections().create(body={"name": "foo/bar/baz"}).execute()
self.testcolleasy = self.api.collections().create(body={"name": "foo-bar-baz"}).execute()
self.fusename = 'foo[SLASH]bar[SLASH]baz'
@IntegrationTest.mount(argv=mnt_args)
- @mock.patch('arvados.util.get_config_once')
- def test_slash_substitution_before_listing(self, get_config_once):
- get_config_once.return_value = {"Collections": {"ForwardSlashNameSubstitution": "[SLASH]"}}
+ def test_slash_substitution_before_listing(self):
self.pool_test(os.path.join(self.mnt, 'zzz'), self.fusename)
self.checkContents()
@staticmethod
@@ -1275,7 +1293,10 @@ class StorageClassesTest(IntegrationTest):
def setUp(self):
super(StorageClassesTest, self).setUp()
- self.api = arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
+ self.api = arvados.safeapi.ThreadSafeApiCache(
+ arvados.config.settings(),
+ version='v1',
+ )
@IntegrationTest.mount(argv=mnt_args)
def test_collection_default_storage_classes(self):
@@ -1312,7 +1333,110 @@ class ReadonlyCollectionTest(MountTestBase):
def runTest(self):
settings = arvados.config.settings().copy()
settings["ARVADOS_API_TOKEN"] = run_test_server.fixture("api_client_authorizations")["project_viewer"]["api_token"]
- self.api = arvados.safeapi.ThreadSafeApiCache(settings)
+ self.api = arvados.safeapi.ThreadSafeApiCache(settings, version='v1')
self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection, enable_write=False)
self.pool.apply(_readonlyCollectionTestHelper, (self.mounttmp,))
+
+
+@parameterized.parameterized_class([
+ {'root_class': fusedir.ProjectDirectory, 'root_kwargs': {
+ 'project_object': run_test_server.fixture('users')['admin'],
+ }},
+ {'root_class': fusedir.ProjectDirectory, 'root_kwargs': {
+ 'project_object': run_test_server.fixture('groups')['public'],
+ }},
+])
+class UnsupportedCreateTest(MountTestBase):
+ root_class = None
+ root_kwargs = {}
+
+ def setUp(self):
+ super().setUp()
+ if 'prefs' in self.root_kwargs.get('project_object', ()):
+ self.root_kwargs['project_object']['prefs'] = {}
+ self.make_mount(self.root_class, **self.root_kwargs)
+ # Make sure the directory knows about its top-level ents.
+ os.listdir(self.mounttmp)
+
+ def test_create(self):
+ test_path = Path(self.mounttmp, 'test_create')
+ with self.assertRaises(OSError) as exc_check:
+ with test_path.open('w'):
+ pass
+ self.assertEqual(exc_check.exception.errno, errno.ENOTSUP)
+
+
+# FIXME: IMO, for consistency with the "create inside a project" case,
+# these operations should also return ENOTSUP instead of EPERM.
+# Right now they're returning EPERM because the clasess' writable() method
+# usually returns False, and the Operations class transforms that accordingly.
+# However, for cases where the mount will never be writable, I think ENOTSUP
+# is a clearer error: it lets the user know they can't fix the problem by
+# adding permissions in Arvados, etc.
+@parameterized.parameterized_class([
+ {'root_class': fusedir.MagicDirectory,
+ 'preset_dir': 'by_id',
+ 'preset_file': 'README',
+ },
+
+ {'root_class': fusedir.SharedDirectory,
+ 'root_kwargs': {
+ 'exclude': run_test_server.fixture('users')['admin']['uuid'],
+ },
+ 'preset_dir': 'Active User',
+ },
+
+ {'root_class': fusedir.TagDirectory,
+ 'root_kwargs': {
+ 'tag': run_test_server.fixture('links')['foo_collection_tag']['name'],
+ },
+ 'preset_dir': run_test_server.fixture('collections')['foo_collection_in_aproject']['uuid'],
+ },
+
+ {'root_class': fusedir.TagsDirectory,
+ 'preset_dir': run_test_server.fixture('links')['foo_collection_tag']['name'],
+ },
+])
+class UnsupportedOperationsTest(UnsupportedCreateTest):
+ preset_dir = None
+ preset_file = None
+
+ def test_create(self):
+ test_path = Path(self.mounttmp, 'test_create')
+ with self.assertRaises(OSError) as exc_check:
+ with test_path.open('w'):
+ pass
+ self.assertEqual(exc_check.exception.errno, errno.EPERM)
+
+ def test_mkdir(self):
+ test_path = Path(self.mounttmp, 'test_mkdir')
+ with self.assertRaises(OSError) as exc_check:
+ test_path.mkdir()
+ self.assertEqual(exc_check.exception.errno, errno.EPERM)
+
+ def test_rename(self):
+ src_name = self.preset_dir or self.preset_file
+ if src_name is None:
+ return
+ test_src = Path(self.mounttmp, src_name)
+ test_dst = test_src.with_name('test_dst')
+ with self.assertRaises(OSError) as exc_check:
+ test_src.rename(test_dst)
+ self.assertEqual(exc_check.exception.errno, errno.EPERM)
+
+ def test_rmdir(self):
+ if self.preset_dir is None:
+ return
+ test_path = Path(self.mounttmp, self.preset_dir)
+ with self.assertRaises(OSError) as exc_check:
+ test_path.rmdir()
+ self.assertEqual(exc_check.exception.errno, errno.EPERM)
+
+ def test_unlink(self):
+ if self.preset_file is None:
+ return
+ test_path = Path(self.mounttmp, self.preset_file)
+ with self.assertRaises(OSError) as exc_check:
+ test_path.unlink()
+ self.assertEqual(exc_check.exception.errno, errno.EPERM)
diff --git a/services/fuse/tests/test_mount_filters.py b/services/fuse/tests/test_mount_filters.py
new file mode 100644
index 0000000000..5f324537fb
--- /dev/null
+++ b/services/fuse/tests/test_mount_filters.py
@@ -0,0 +1,223 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import collections
+import itertools
+import json
+import re
+import unittest
+
+from pathlib import Path
+
+from parameterized import parameterized
+
+from arvados_fuse import fusedir
+
+from .integration_test import IntegrationTest
+from .mount_test_base import MountTestBase
+from .run_test_server import fixture
+
+_COLLECTIONS = fixture('collections')
+_GROUPS = fixture('groups')
+_LINKS = fixture('links')
+_USERS = fixture('users')
+
+class DirectoryFiltersTestCase(MountTestBase):
+ DEFAULT_ROOT_KWARGS = {
+ 'enable_write': False,
+ 'filters': [
+ ['collections.name', 'like', 'zzzzz-4zz18-%'],
+ # This matches both "A Project" (which we use as the test root)
+ # and "A Subproject" (which we assert is found under it).
+ ['groups.name', 'like', 'A %roject'],
+ ],
+ }
+ EXPECTED_PATHS = frozenset([
+ _COLLECTIONS['foo_collection_in_aproject']['name'],
+ _GROUPS['asubproject']['name'],
+ ])
+ CHECKED_PATHS = EXPECTED_PATHS.union([
+ _COLLECTIONS['collection_to_move_around_in_aproject']['name'],
+ _GROUPS['subproject_in_active_user_home_project_to_test_unique_key_violation']['name'],
+ ])
+
+ @parameterized.expand([
+ (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),
+ (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),
+ (fusedir.SharedDirectory, {'exclude': None}, Path(
+ '{first_name} {last_name}'.format_map(_USERS['active']),
+ _GROUPS['aproject']['name'],
+ )),
+ ])
+ def test_filtered_path_exists(self, root_class, root_kwargs, subdir):
+ root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)
+ self.make_mount(root_class, **root_kwargs)
+ dir_path = Path(self.mounttmp, subdir)
+ actual = frozenset(
+ basename
+ for basename in self.CHECKED_PATHS
+ if (dir_path / basename).exists()
+ )
+ self.assertEqual(
+ actual,
+ self.EXPECTED_PATHS,
+ "mount existence checks did not match expected results",
+ )
+
+ @parameterized.expand([
+ (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),
+ (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),
+ (fusedir.SharedDirectory, {'exclude': None}, Path(
+ '{first_name} {last_name}'.format_map(_USERS['active']),
+ _GROUPS['aproject']['name'],
+ )),
+ ])
+ def test_filtered_path_listing(self, root_class, root_kwargs, subdir):
+ root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)
+ self.make_mount(root_class, **root_kwargs)
+ actual = frozenset(path.name for path in Path(self.mounttmp, subdir).iterdir())
+ self.assertEqual(
+ actual & self.EXPECTED_PATHS,
+ self.EXPECTED_PATHS,
+ "mount listing did not include minimum matches",
+ )
+ extra = frozenset(
+ name
+ for name in actual
+ if not (name.startswith('zzzzz-4zz18-') or name.endswith('roject'))
+ )
+ self.assertFalse(
+ extra,
+ "mount listing included results outside filters",
+ )
+
+
+class TagFiltersTestCase(MountTestBase):
+ COLL_UUID = _COLLECTIONS['foo_collection_in_aproject']['uuid']
+ TAG_NAME = _LINKS['foo_collection_tag']['name']
+
+ @parameterized.expand([
+ '=',
+ '!=',
+ ])
+ def test_tag_directory_filters(self, op):
+ self.make_mount(
+ fusedir.TagDirectory,
+ enable_write=False,
+ filters=[
+ ['links.head_uuid', op, self.COLL_UUID],
+ ],
+ tag=self.TAG_NAME,
+ )
+ checked_path = Path(self.mounttmp, self.COLL_UUID)
+ self.assertEqual(checked_path.exists(), op == '=')
+
+ @parameterized.expand(itertools.product(
+ ['in', 'not in'],
+ ['=', '!='],
+ ))
+ def test_tags_directory_filters(self, coll_op, link_op):
+ self.make_mount(
+ fusedir.TagsDirectory,
+ enable_write=False,
+ filters=[
+ ['links.head_uuid', coll_op, [self.COLL_UUID]],
+ ['links.name', link_op, self.TAG_NAME],
+ ],
+ )
+ if link_op == '!=':
+ filtered_path = Path(self.mounttmp, self.TAG_NAME)
+ elif coll_op == 'not in':
+ # As of 2024-02-09, foo tag only applies to the single collection.
+ # If you filter it out via head_uuid, then it disappears completely
+ # from the TagsDirectory. Hence we set that tag directory as
+ # filtered_path. If any of this changes in the future,
+ # it would be fine to append self.COLL_UUID to filtered_path here.
+ filtered_path = Path(self.mounttmp, self.TAG_NAME)
+ else:
+ filtered_path = Path(self.mounttmp, self.TAG_NAME, self.COLL_UUID, 'foo', 'nonexistent')
+ expect_path = filtered_path.parent
+ self.assertTrue(
+ expect_path.exists(),
+ f"path not found but should exist: {expect_path}",
+ )
+ self.assertFalse(
+ filtered_path.exists(),
+ f"path was found but should be filtered out: {filtered_path}",
+ )
+
+
+class FiltersIntegrationTest(IntegrationTest):
+ COLLECTIONS_BY_PROP = {
+ coll['properties']['MainFile']: coll
+ for coll in _COLLECTIONS.values()
+ if coll['owner_uuid'] == _GROUPS['fuse_filters_test_project']['uuid']
+ }
+ PROP_VALUES = list(COLLECTIONS_BY_PROP)
+
+ for test_n, query in enumerate(['foo', 'ba?']):
+ @IntegrationTest.mount([
+ '--filters', json.dumps([
+ ['collections.properties.MainFile', 'like', query],
+ ]),
+ '--mount-by-pdh', 'by_pdh',
+ '--mount-by-id', 'by_id',
+ '--mount-home', 'home',
+ ])
+ def _test_func(self, query=query):
+ pdh_path = Path(self.mnt, 'by_pdh')
+ id_path = Path(self.mnt, 'by_id')
+ home_path = Path(self.mnt, 'home')
+ query_re = re.compile(query.replace('?', '.'))
+ for prop_val, coll in self.COLLECTIONS_BY_PROP.items():
+ should_exist = query_re.fullmatch(prop_val) is not None
+ for path in [
+ pdh_path / coll['portable_data_hash'],
+ id_path / coll['portable_data_hash'],
+ id_path / coll['uuid'],
+ home_path / coll['name'],
+ ]:
+ self.assertEqual(
+ path.exists(),
+ should_exist,
+ f"{path} from MainFile={prop_val} exists!={should_exist}",
+ )
+ exec(f"test_collection_properties_filters_{test_n} = _test_func")
+
+ for test_n, mount_opts in enumerate([
+ ['--home'],
+ ['--project', _GROUPS['aproject']['uuid']],
+ ]):
+ @IntegrationTest.mount([
+ '--filters', json.dumps([
+ ['collections.name', 'like', 'zzzzz-4zz18-%'],
+ ['groups.name', 'like', 'A %roject'],
+ ]),
+ *mount_opts,
+ ])
+ def _test_func(self, mount_opts=mount_opts):
+ root_path = Path(self.mnt)
+ root_depth = len(root_path.parts)
+ max_depth = 0
+ name_re = re.compile(r'(zzzzz-4zz18-.*|A .*roject)')
+ dir_queue = [root_path]
+ while dir_queue:
+ root_path = dir_queue.pop()
+ max_depth = max(max_depth, len(root_path.parts))
+ for child in root_path.iterdir():
+ if not child.is_dir():
+ continue
+ match = name_re.fullmatch(child.name)
+ self.assertIsNotNone(
+ match,
+ "found directory with name that should've been filtered",
+ )
+ if not match.group(1).startswith('zzzzz-4zz18-'):
+ dir_queue.append(child)
+ self.assertGreaterEqual(
+ max_depth,
+ root_depth + (2 if mount_opts[0] == '--home' else 1),
+ "test descended fewer subdirectories than expected",
+ )
+ exec(f"test_multiple_name_filters_{test_n} = _test_func")
diff --git a/services/fuse/tests/test_retry.py b/services/fuse/tests/test_retry.py
index b69707af4f..92081de0a0 100644
--- a/services/fuse/tests/test_retry.py
+++ b/services/fuse/tests/test_retry.py
@@ -8,7 +8,6 @@ standard_library.install_aliases()
import arvados
import arvados_fuse.command
import json
-import mock
import os
import pycurl
import queue
@@ -16,8 +15,9 @@ from . import run_test_server
import tempfile
import unittest
-from .integration_test import IntegrationTest
+from unittest import mock
+from .integration_test import IntegrationTest
class KeepClientRetry(unittest.TestCase):
origKeepClient = arvados.keep.KeepClient
@@ -38,8 +38,8 @@ class KeepClientRetry(unittest.TestCase):
pass
self.assertEqual(num_retries, kc.call_args[1].get('num_retries'))
- def test_default_retry_3(self):
- self._test_retry(3, [])
+ def test_default_retry_10(self):
+ self._test_retry(10, [])
def test_retry_2(self):
self._test_retry(2, ['--retries=2'])
diff --git a/services/fuse/tests/test_token_expiry.py b/services/fuse/tests/test_token_expiry.py
index 040db2e096..ca2228c561 100644
--- a/services/fuse/tests/test_token_expiry.py
+++ b/services/fuse/tests/test_token_expiry.py
@@ -7,7 +7,6 @@ import apiclient
import arvados
import arvados_fuse
import logging
-import mock
import multiprocessing
import os
import re
@@ -15,6 +14,8 @@ import sys
import time
import unittest
+from unittest import mock
+
from .integration_test import IntegrationTest
logger = logging.getLogger('arvados.arv-mount')
diff --git a/services/fuse/tests/test_unmount.py b/services/fuse/tests/test_unmount.py
index e89571087e..6a19b33454 100644
--- a/services/fuse/tests/test_unmount.py
+++ b/services/fuse/tests/test_unmount.py
@@ -31,11 +31,11 @@ class UnmountTest(IntegrationTest):
self.mnt])
subprocess.check_call(
['./bin/arv-mount', '--subtype', 'test', '--replace',
- '--unmount-timeout', '10',
+ '--unmount-timeout', '60',
self.mnt])
subprocess.check_call(
['./bin/arv-mount', '--subtype', 'test', '--replace',
- '--unmount-timeout', '10',
+ '--unmount-timeout', '60',
self.mnt,
'--exec', 'true'])
for m in subprocess.check_output(['mount']).splitlines():
diff --git a/services/keep-balance/balance.go b/services/keep-balance/balance.go
index 1dedb409a4..e71eb07efa 100644
--- a/services/keep-balance/balance.go
+++ b/services/keep-balance/balance.go
@@ -15,14 +15,17 @@ import (
"log"
"math"
"os"
+ "regexp"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/jmoiron/sqlx"
@@ -44,6 +47,7 @@ type Balancer struct {
Dumper logrus.FieldLogger
Metrics *metrics
+ ChunkPrefix string
LostBlocksFile string
*BlockStateMap
@@ -67,18 +71,23 @@ type Balancer struct {
// subsequent balance operation.
//
// Run should only be called once on a given Balancer object.
-//
-// Typical usage:
-//
-// runOptions, err = (&Balancer{}).Run(config, runOptions)
-func (bal *Balancer) Run(client *arvados.Client, cluster *arvados.Cluster, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
+func (bal *Balancer) Run(ctx context.Context, client *arvados.Client, cluster *arvados.Cluster, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
nextRunOptions = runOptions
+ bal.logf("acquiring active lock")
+ if !dblock.KeepBalanceActive.Lock(ctx, func(context.Context) (*sqlx.DB, error) { return bal.DB, nil }) {
+ // context canceled
+ return
+ }
+ defer dblock.KeepBalanceActive.Unlock()
+
defer bal.time("sweep", "wall clock time to run one full sweep")()
- ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(cluster.Collections.BalanceTimeout.Duration()))
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(cluster.Collections.BalanceTimeout.Duration()))
defer cancel()
+ go bal.reportMemorySize(ctx)
+
var lbFile *os.File
if bal.LostBlocksFile != "" {
tmpfn := bal.LostBlocksFile + ".tmp"
@@ -128,7 +137,7 @@ func (bal *Balancer) Run(client *arvados.Client, cluster *arvados.Cluster, runOp
client.Timeout = 0
rs := bal.rendezvousState()
- if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
+ if cluster.Collections.BalanceTrashLimit > 0 && rs != runOptions.SafeRendezvousState {
if runOptions.SafeRendezvousState != "" {
bal.logf("notice: KeepServices list has changed since last run")
}
@@ -146,6 +155,7 @@ func (bal *Balancer) Run(client *arvados.Client, cluster *arvados.Cluster, runOp
if err = bal.GetCurrentState(ctx, client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
return
}
+ bal.setupLookupTables(cluster)
bal.ComputeChangeSets()
bal.PrintStatistics()
if err = bal.CheckSanityLate(); err != nil {
@@ -162,14 +172,14 @@ func (bal *Balancer) Run(client *arvados.Client, cluster *arvados.Cluster, runOp
}
lbFile = nil
}
- if runOptions.CommitPulls {
+ if cluster.Collections.BalancePullLimit > 0 {
err = bal.CommitPulls(ctx, client)
if err != nil {
// Skip trash if we can't pull. (Too cautious?)
return
}
}
- if runOptions.CommitTrash {
+ if cluster.Collections.BalanceTrashLimit > 0 {
err = bal.CommitTrash(ctx, client)
if err != nil {
return
@@ -218,7 +228,7 @@ func (bal *Balancer) cleanupMounts() {
rwdev := map[string]*KeepService{}
for _, srv := range bal.KeepServices {
for _, mnt := range srv.mounts {
- if !mnt.ReadOnly {
+ if mnt.AllowWrite {
rwdev[mnt.UUID] = srv
}
}
@@ -228,7 +238,7 @@ func (bal *Balancer) cleanupMounts() {
for _, srv := range bal.KeepServices {
var dedup []*KeepMount
for _, mnt := range srv.mounts {
- if mnt.ReadOnly && rwdev[mnt.UUID] != nil {
+ if !mnt.AllowWrite && rwdev[mnt.UUID] != nil {
bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
} else {
dedup = append(dedup, mnt)
@@ -266,6 +276,14 @@ func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
}
}
+ for _, c := range bal.ChunkPrefix {
+ if !strings.ContainsRune("0123456789abcdef", c) {
+ return fmt.Errorf("invalid char %q in chunk prefix %q: only lowercase hex digits make sense", string(c), bal.ChunkPrefix)
+ }
+ }
+ if len(bal.ChunkPrefix) > 32 {
+ return fmt.Errorf("invalid chunk prefix %q: longer than a block hash", bal.ChunkPrefix)
+ }
mountProblem := false
type deviceMount struct {
@@ -398,7 +416,7 @@ func (bal *Balancer) GetCurrentState(ctx context.Context, c *arvados.Client, pag
go func(mounts []*KeepMount) {
defer wg.Done()
bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
- idx, err := mounts[0].KeepService.IndexMount(ctx, c, mounts[0].UUID, "")
+ idx, err := mounts[0].KeepService.IndexMount(ctx, c, mounts[0].UUID, bal.ChunkPrefix)
if err != nil {
select {
case errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err):
@@ -490,6 +508,20 @@ func (bal *Balancer) addCollection(coll arvados.Collection) error {
if coll.ReplicationDesired != nil {
repl = *coll.ReplicationDesired
}
+ if bal.ChunkPrefix != "" {
+ // Throw out blocks that don't match the requested
+ // prefix. (We save a bit of GC work here by
+ // preallocating based on each hex digit in
+ // ChunkPrefix reducing the expected size of the
+ // filtered set by ~16x.)
+ filtered := make([]arvados.SizedDigest, 0, len(blkids)>>(4*len(bal.ChunkPrefix)-1))
+ for _, blkid := range blkids {
+ if strings.HasPrefix(string(blkid), bal.ChunkPrefix) {
+ filtered = append(filtered, blkid)
+ }
+ }
+ blkids = filtered
+ }
bal.Logger.Debugf("%v: %d blocks x%d", coll.UUID, len(blkids), repl)
// Pass pdh to IncreaseDesired only if LostBlocksFile is being
// written -- otherwise it's just a waste of memory.
@@ -511,7 +543,6 @@ func (bal *Balancer) ComputeChangeSets() {
// This just calls balanceBlock() once for each block, using a
// pool of worker goroutines.
defer bal.time("changeset_compute", "wall clock time to compute changesets")()
- bal.setupLookupTables()
type balanceTask struct {
blkid arvados.SizedDigest
@@ -546,7 +577,7 @@ func (bal *Balancer) ComputeChangeSets() {
bal.collectStatistics(results)
}
-func (bal *Balancer) setupLookupTables() {
+func (bal *Balancer) setupLookupTables(cluster *arvados.Cluster) {
bal.serviceRoots = make(map[string]string)
bal.classes = defaultClasses
bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
@@ -556,9 +587,11 @@ func (bal *Balancer) setupLookupTables() {
for _, mnt := range srv.mounts {
bal.mounts++
- // All mounts on a read-only service are
- // effectively read-only.
- mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
+ if srv.ReadOnly {
+ // All mounts on a read-only service
+ // are effectively read-only.
+ mnt.AllowWrite = false
+ }
for class := range mnt.StorageClasses {
if mbc := bal.mountsByClass[class]; mbc == nil {
@@ -576,6 +609,13 @@ func (bal *Balancer) setupLookupTables() {
// class" case in balanceBlock depends on the order classes
// are considered.
sort.Strings(bal.classes)
+
+ for _, srv := range bal.KeepServices {
+ srv.ChangeSet = &ChangeSet{
+ PullLimit: cluster.Collections.BalancePullLimit,
+ TrashLimit: cluster.Collections.BalanceTrashLimit,
+ }
+ }
}
const (
@@ -636,7 +676,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
slots = append(slots, slot{
mnt: mnt,
repl: repl,
- want: repl != nil && mnt.ReadOnly,
+ want: repl != nil && !mnt.AllowTrash,
})
}
}
@@ -725,7 +765,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
protMnt[slot.mnt] = true
replProt += slot.mnt.Replication
}
- if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+ if replWant < desired && (slot.repl != nil || slot.mnt.AllowWrite) {
slots[i].want = true
wantSrv[slot.mnt.KeepService] = true
wantMnt[slot.mnt] = true
@@ -798,23 +838,53 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
}
blockState := computeBlockState(slots, nil, len(blk.Replicas), 0)
- var lost bool
- var changes []string
+ // Sort the slots by rendezvous order. This ensures "trash the
+ // first of N replicas with identical timestamps" is
+ // predictable (helpful for testing) and well distributed
+ // across servers.
+ sort.Slice(slots, func(i, j int) bool {
+ si, sj := slots[i], slots[j]
+ if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
+ return orderi < orderj
+ } else {
+ return rendezvousLess(si.mnt.UUID, sj.mnt.UUID, blkid)
+ }
+ })
+
+ var (
+ lost bool
+ changes []string
+ trashedMtime = make(map[int64]bool, len(slots))
+ )
for _, slot := range slots {
// TODO: request a Touch if Mtime is duplicated.
var change int
switch {
case !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime:
- slot.mnt.KeepService.AddTrash(Trash{
- SizedDigest: blkid,
- Mtime: slot.repl.Mtime,
- From: slot.mnt,
- })
- change = changeTrash
+ if trashedMtime[slot.repl.Mtime] {
+ // Don't trash multiple replicas with
+ // identical timestamps. If they are
+ // multiple views of the same backing
+ // storage, asking both servers to
+ // trash is redundant and can cause
+ // races (see #20242). If they are
+ // distinct replicas that happen to
+ // have identical timestamps, we'll
+ // get this one on the next sweep.
+ change = changeNone
+ } else {
+ slot.mnt.KeepService.AddTrash(Trash{
+ SizedDigest: blkid,
+ Mtime: slot.repl.Mtime,
+ From: slot.mnt,
+ })
+ change = changeTrash
+ trashedMtime[slot.repl.Mtime] = true
+ }
case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
lost = true
change = changeNone
- case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+ case slot.repl == nil && slot.want && slot.mnt.AllowWrite:
slot.mnt.KeepService.AddPull(Pull{
SizedDigest: blkid,
From: blk.Replicas[0].KeepMount.KeepService,
@@ -894,19 +964,21 @@ type replicationStats struct {
}
type balancerStats struct {
- lost blocksNBytes
- overrep blocksNBytes
- unref blocksNBytes
- garbage blocksNBytes
- underrep blocksNBytes
- unachievable blocksNBytes
- justright blocksNBytes
- desired blocksNBytes
- current blocksNBytes
- pulls int
- trashes int
- replHistogram []int
- classStats map[string]replicationStats
+ lost blocksNBytes
+ overrep blocksNBytes
+ unref blocksNBytes
+ garbage blocksNBytes
+ underrep blocksNBytes
+ unachievable blocksNBytes
+ justright blocksNBytes
+ desired blocksNBytes
+ current blocksNBytes
+ pulls int
+ pullsDeferred int
+ trashes int
+ trashesDeferred int
+ replHistogram []int
+ classStats map[string]replicationStats
// collectionBytes / collectionBlockBytes = deduplication ratio
collectionBytes int64 // sum(bytes in referenced blocks) across all collections
@@ -1029,7 +1101,9 @@ func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
}
for _, srv := range bal.KeepServices {
s.pulls += len(srv.ChangeSet.Pulls)
+ s.pullsDeferred += srv.ChangeSet.PullsDeferred
s.trashes += len(srv.ChangeSet.Trashes)
+ s.trashesDeferred += srv.ChangeSet.TrashesDeferred
}
bal.stats = s
bal.Metrics.UpdateStats(s)
@@ -1185,6 +1259,60 @@ func (bal *Balancer) time(name, help string) func() {
}
}
+// Log current memory usage: once now, at least once every 10 minutes,
+// and when memory grows by 40% since the last log. Stop when ctx is
+// canceled.
+func (bal *Balancer) reportMemorySize(ctx context.Context) {
+ buf, _ := os.ReadFile("/proc/self/smaps")
+ m := regexp.MustCompile(`\nKernelPageSize:\s*(\d+) kB\n`).FindSubmatch(buf)
+ var pagesize int64
+ if len(m) == 2 {
+ pagesize, _ = strconv.ParseInt(string(m[1]), 10, 64)
+ pagesize <<= 10
+ }
+ if pagesize == 0 {
+ bal.logf("cannot log OS-reported memory size: failed to parse KernelPageSize from /proc/self/smaps")
+ }
+ osstats := func() string {
+ if pagesize == 0 {
+ return ""
+ }
+ buf, _ := os.ReadFile("/proc/self/statm")
+ fields := strings.Split(string(buf), " ")
+ if len(fields) < 2 {
+ return ""
+ }
+ virt, _ := strconv.ParseInt(fields[0], 10, 64)
+ virt *= pagesize
+ res, _ := strconv.ParseInt(fields[1], 10, 64)
+ res *= pagesize
+ if virt == 0 || res == 0 {
+ return ""
+ }
+ return fmt.Sprintf(" virt %d res %d", virt, res)
+ }
+
+ var nextTime time.Time
+ var nextMem uint64
+ const maxInterval = time.Minute * 10
+ const maxIncrease = 1.4
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+ var memstats runtime.MemStats
+ for ctx.Err() == nil {
+ now := time.Now()
+ runtime.ReadMemStats(&memstats)
+ mem := memstats.StackInuse + memstats.HeapInuse
+ if now.After(nextTime) || mem >= nextMem {
+ bal.logf("heap %d stack %d heapalloc %d%s", memstats.HeapInuse, memstats.StackInuse, memstats.HeapAlloc, osstats())
+ nextMem = uint64(float64(mem) * maxIncrease)
+ nextTime = now.Add(maxInterval)
+ }
+ <-ticker.C
+ }
+}
+
// Rendezvous hash sort function. Less efficient than sorting on
// precomputed rendezvous hashes, but also rarely used.
func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {
diff --git a/services/keep-balance/balance_run_test.go b/services/keep-balance/balance_run_test.go
index 2db7bea173..81e4c7b867 100644
--- a/services/keep-balance/balance_run_test.go
+++ b/services/keep-balance/balance_run_test.go
@@ -5,7 +5,7 @@
package keepbalance
import (
- "bytes"
+ "context"
"encoding/json"
"fmt"
"io"
@@ -15,6 +15,7 @@ import (
"os"
"strings"
"sync"
+ "syscall"
"time"
"git.arvados.org/arvados.git/lib/config"
@@ -23,7 +24,6 @@ import (
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/expfmt"
check "gopkg.in/check.v1"
)
@@ -90,21 +90,29 @@ var stubMounts = map[string][]arvados.KeepMount{
UUID: "zzzzz-ivpuk-000000000000000",
DeviceID: "keep0-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
"keep1.zzzzz.arvadosapi.com:25107": {{
UUID: "zzzzz-ivpuk-100000000000000",
DeviceID: "keep1-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
"keep2.zzzzz.arvadosapi.com:25107": {{
UUID: "zzzzz-ivpuk-200000000000000",
DeviceID: "keep2-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
"keep3.zzzzz.arvadosapi.com:25107": {{
UUID: "zzzzz-ivpuk-300000000000000",
DeviceID: "keep3-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
}
@@ -255,26 +263,32 @@ func (s *stubServer) serveKeepstoreMounts() *reqTracker {
}
func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
+ fooLine := func(mt int) string { return fmt.Sprintf("acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+mt) }
+ barLine := "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n"
rt := &reqTracker{}
s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) {
count := rt.Add(r)
- if r.Host == "keep0.zzzzz.arvadosapi.com:25107" {
- io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n")
+ if r.Host == "keep0.zzzzz.arvadosapi.com:25107" && strings.HasPrefix(barLine, r.URL.Path[7:]) {
+ io.WriteString(w, barLine)
}
- fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n\n", 12345678+count)
+ if strings.HasPrefix(fooLine(count), r.URL.Path[7:]) {
+ io.WriteString(w, fooLine(count))
+ }
+ io.WriteString(w, "\n")
})
for _, mounts := range stubMounts {
for i, mnt := range mounts {
i := i
s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
count := rt.Add(r)
- if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" {
- io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n")
+ r.ParseForm()
+ if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" && strings.HasPrefix(barLine, r.Form.Get("prefix")) {
+ io.WriteString(w, barLine)
}
- if i == 0 {
- fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+count)
+ if i == 0 && strings.HasPrefix(fooLine(count), r.Form.Get("prefix")) {
+ io.WriteString(w, fooLine(count))
}
- fmt.Fprintf(w, "\n")
+ io.WriteString(w, "\n")
})
}
}
@@ -282,21 +296,44 @@ func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
}
func (s *stubServer) serveKeepstoreIndexFoo1() *reqTracker {
+ fooLine := "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n"
rt := &reqTracker{}
s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) {
rt.Add(r)
- io.WriteString(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n\n")
+ if r.Host == "keep0.zzzzz.arvadosapi.com:25107" && strings.HasPrefix(fooLine, r.URL.Path[7:]) {
+ io.WriteString(w, fooLine)
+ }
+ io.WriteString(w, "\n")
})
for _, mounts := range stubMounts {
for i, mnt := range mounts {
i := i
s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
rt.Add(r)
- if i == 0 {
- io.WriteString(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n\n")
- } else {
- io.WriteString(w, "\n")
+ if i == 0 && strings.HasPrefix(fooLine, r.Form.Get("prefix")) {
+ io.WriteString(w, fooLine)
}
+ io.WriteString(w, "\n")
+ })
+ }
+ }
+ return rt
+}
+
+func (s *stubServer) serveKeepstoreIndexIgnoringPrefix() *reqTracker {
+ fooLine := "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n"
+ rt := &reqTracker{}
+ s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) {
+ rt.Add(r)
+ io.WriteString(w, fooLine)
+ io.WriteString(w, "\n")
+ })
+ for _, mounts := range stubMounts {
+ for _, mnt := range mounts {
+ s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
+ rt.Add(r)
+ io.WriteString(w, fooLine)
+ io.WriteString(w, "\n")
})
}
}
@@ -360,9 +397,7 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
_, err := s.db.Exec(`delete from collections`)
c.Assert(err, check.IsNil)
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
- Logger: ctxlog.TestLogger(c),
+ Logger: ctxlog.TestLogger(c),
}
s.stub.serveCurrentUserAdmin()
s.stub.serveZeroCollections()
@@ -372,18 +407,37 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
srv := s.newServer(&opts)
- _, err = srv.runOnce()
+ _, err = srv.runOnce(context.Background())
c.Check(err, check.ErrorMatches, "received zero collections")
c.Check(trashReqs.Count(), check.Equals, 4)
c.Check(pullReqs.Count(), check.Equals, 0)
}
-func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
+func (s *runSuite) TestRefuseBadIndex(c *check.C) {
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
+ ChunkPrefix: "abc",
Logger: ctxlog.TestLogger(c),
}
+ s.stub.serveCurrentUserAdmin()
+ s.stub.serveFooBarFileCollections()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
+ s.stub.serveKeepstoreIndexIgnoringPrefix()
+ trashReqs := s.stub.serveKeepstoreTrash()
+ pullReqs := s.stub.serveKeepstorePull()
+ srv := s.newServer(&opts)
+ bal, err := srv.runOnce(context.Background())
+ c.Check(err, check.ErrorMatches, ".*Index response included block .* despite asking for prefix \"abc\"")
+ c.Check(trashReqs.Count(), check.Equals, 4)
+ c.Check(pullReqs.Count(), check.Equals, 0)
+ c.Check(bal.stats.trashes, check.Equals, 0)
+ c.Check(bal.stats.pulls, check.Equals, 0)
+}
+
+func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
+ opts := RunOptions{
+ Logger: ctxlog.TestLogger(c),
+ }
s.stub.serveCurrentUserNotAdmin()
s.stub.serveZeroCollections()
s.stub.serveKeepServices(stubServices)
@@ -391,17 +445,44 @@ func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
srv := s.newServer(&opts)
- _, err := srv.runOnce()
+ _, err := srv.runOnce(context.Background())
c.Check(err, check.ErrorMatches, "current user .* is not .* admin user")
c.Check(trashReqs.Count(), check.Equals, 0)
c.Check(pullReqs.Count(), check.Equals, 0)
}
+func (s *runSuite) TestInvalidChunkPrefix(c *check.C) {
+ for _, trial := range []struct {
+ prefix string
+ errRe string
+ }{
+ {"123ABC", "invalid char \"A\" in chunk prefix.*"},
+ {"123xyz", "invalid char \"x\" in chunk prefix.*"},
+ {"123456789012345678901234567890123", "invalid chunk prefix .* longer than a block hash"},
+ } {
+ s.SetUpTest(c)
+ c.Logf("trying invalid prefix %q", trial.prefix)
+ opts := RunOptions{
+ ChunkPrefix: trial.prefix,
+ Logger: ctxlog.TestLogger(c),
+ }
+ s.stub.serveCurrentUserAdmin()
+ s.stub.serveFooBarFileCollections()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
+ trashReqs := s.stub.serveKeepstoreTrash()
+ pullReqs := s.stub.serveKeepstorePull()
+ srv := s.newServer(&opts)
+ _, err := srv.runOnce(context.Background())
+ c.Check(err, check.ErrorMatches, trial.errRe)
+ c.Check(trashReqs.Count(), check.Equals, 0)
+ c.Check(pullReqs.Count(), check.Equals, 0)
+ }
+}
+
func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) {
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
- Logger: ctxlog.TestLogger(c),
+ Logger: ctxlog.TestLogger(c),
}
s.stub.serveCurrentUserAdmin()
s.stub.serveZeroCollections()
@@ -417,7 +498,7 @@ func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) {
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
srv := s.newServer(&opts)
- _, err := srv.runOnce()
+ _, err := srv.runOnce(context.Background())
c.Check(err, check.ErrorMatches, "cannot continue with config errors.*")
c.Check(trashReqs.Count(), check.Equals, 0)
c.Check(pullReqs.Count(), check.Equals, 0)
@@ -429,9 +510,7 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) {
s.config.Collections.BlobMissingReport = lostf.Name()
defer os.Remove(lostf.Name())
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
- Logger: ctxlog.TestLogger(c),
+ Logger: ctxlog.TestLogger(c),
}
s.stub.serveCurrentUserAdmin()
s.stub.serveFooBarFileCollections()
@@ -442,7 +521,7 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) {
s.stub.serveKeepstorePull()
srv := s.newServer(&opts)
c.Assert(err, check.IsNil)
- _, err = srv.runOnce()
+ _, err = srv.runOnce(context.Background())
c.Check(err, check.IsNil)
lost, err := ioutil.ReadFile(lostf.Name())
c.Assert(err, check.IsNil)
@@ -450,10 +529,10 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) {
}
func (s *runSuite) TestDryRun(c *check.C) {
+ s.config.Collections.BalanceTrashLimit = 0
+ s.config.Collections.BalancePullLimit = 0
opts := RunOptions{
- CommitPulls: false,
- CommitTrash: false,
- Logger: ctxlog.TestLogger(c),
+ Logger: ctxlog.TestLogger(c),
}
s.stub.serveCurrentUserAdmin()
collReqs := s.stub.serveFooBarFileCollections()
@@ -463,7 +542,7 @@ func (s *runSuite) TestDryRun(c *check.C) {
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
srv := s.newServer(&opts)
- bal, err := srv.runOnce()
+ bal, err := srv.runOnce(context.Background())
c.Check(err, check.IsNil)
for _, req := range collReqs.reqs {
c.Check(req.Form.Get("include_trash"), check.Equals, "true")
@@ -471,19 +550,24 @@ func (s *runSuite) TestDryRun(c *check.C) {
}
c.Check(trashReqs.Count(), check.Equals, 0)
c.Check(pullReqs.Count(), check.Equals, 0)
- c.Check(bal.stats.pulls, check.Not(check.Equals), 0)
+ c.Check(bal.stats.pulls, check.Equals, 0)
+ c.Check(bal.stats.pullsDeferred, check.Not(check.Equals), 0)
+ c.Check(bal.stats.trashes, check.Equals, 0)
+ c.Check(bal.stats.trashesDeferred, check.Not(check.Equals), 0)
c.Check(bal.stats.underrep.replicas, check.Not(check.Equals), 0)
c.Check(bal.stats.overrep.replicas, check.Not(check.Equals), 0)
+
+ metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_trash_entries_deferred_count [1-9].*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_pull_entries_deferred_count [1-9].*`)
}
func (s *runSuite) TestCommit(c *check.C) {
s.config.Collections.BlobMissingReport = c.MkDir() + "/keep-balance-lost-blocks-test-"
s.config.ManagementToken = "xyzzy"
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
- Logger: ctxlog.TestLogger(c),
- Dumper: ctxlog.TestLogger(c),
+ Logger: ctxlog.TestLogger(c),
+ Dumper: ctxlog.TestLogger(c),
}
s.stub.serveCurrentUserAdmin()
s.stub.serveFooBarFileCollections()
@@ -493,7 +577,7 @@ func (s *runSuite) TestCommit(c *check.C) {
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
srv := s.newServer(&opts)
- bal, err := srv.runOnce()
+ bal, err := srv.runOnce(context.Background())
c.Check(err, check.IsNil)
c.Check(trashReqs.Count(), check.Equals, 8)
c.Check(pullReqs.Count(), check.Equals, 4)
@@ -507,21 +591,48 @@ func (s *runSuite) TestCommit(c *check.C) {
c.Assert(err, check.IsNil)
c.Check(string(lost), check.Not(check.Matches), `(?ms).*acbd18db4cc2f85cedef654fccc4a4d8.*`)
- buf, err := s.getMetrics(c, srv)
- c.Check(err, check.IsNil)
- bufstr := buf.String()
- c.Check(bufstr, check.Matches, `(?ms).*\narvados_keep_total_bytes 15\n.*`)
- c.Check(bufstr, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_sum [0-9\.]+\n.*`)
- c.Check(bufstr, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 1\n.*`)
- c.Check(bufstr, check.Matches, `(?ms).*\narvados_keep_dedup_byte_ratio [1-9].*`)
- c.Check(bufstr, check.Matches, `(?ms).*\narvados_keep_dedup_block_ratio [1-9].*`)
+ metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_total_bytes 15\n.*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_sum [0-9\.]+\n.*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 1\n.*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_dedup_byte_ratio [1-9].*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_dedup_block_ratio [1-9].*`)
+
+ for _, cat := range []string{
+ "dedup_byte_ratio", "dedup_block_ratio", "collection_bytes",
+ "referenced_bytes", "referenced_blocks", "reference_count",
+ "pull_entries_sent_count",
+ "trash_entries_sent_count",
+ } {
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_`+cat+` [1-9].*`)
+ }
+
+ for _, cat := range []string{
+ "pull_entries_deferred_count",
+ "trash_entries_deferred_count",
+ } {
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_`+cat+` 0\n.*`)
+ }
+
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_replicated_block_count{replicas="0"} [1-9].*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_replicated_block_count{replicas="1"} [1-9].*`)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_replicated_block_count{replicas="9"} 0\n.*`)
+
+ for _, sub := range []string{"replicas", "blocks", "bytes"} {
+ for _, cat := range []string{"needed", "unneeded", "unachievable", "pulling"} {
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_usage_`+sub+`{status="`+cat+`",storage_class="default"} [1-9].*`)
+ }
+ for _, cat := range []string{"total", "garbage", "transient", "overreplicated", "underreplicated", "unachievable", "balanced", "desired", "lost"} {
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_`+cat+`_`+sub+` [0-9].*`)
+ }
+ }
+ c.Logf("%s", metrics)
}
-func (s *runSuite) TestRunForever(c *check.C) {
- s.config.ManagementToken = "xyzzy"
+func (s *runSuite) TestChunkPrefix(c *check.C) {
+ s.config.Collections.BlobMissingReport = c.MkDir() + "/keep-balance-lost-blocks-test-"
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
+ ChunkPrefix: "ac", // catch "foo" but not "bar"
Logger: ctxlog.TestLogger(c),
Dumper: ctxlog.TestLogger(c),
}
@@ -532,46 +643,124 @@ func (s *runSuite) TestRunForever(c *check.C) {
s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
+ srv := s.newServer(&opts)
+ bal, err := srv.runOnce(context.Background())
+ c.Check(err, check.IsNil)
+ c.Check(trashReqs.Count(), check.Equals, 8)
+ c.Check(pullReqs.Count(), check.Equals, 4)
+ // "foo" block is overreplicated by 2
+ c.Check(bal.stats.trashes, check.Equals, 2)
+ // "bar" block is underreplicated but does not match prefix
+ c.Check(bal.stats.pulls, check.Equals, 0)
+
+ lost, err := ioutil.ReadFile(s.config.Collections.BlobMissingReport)
+ c.Assert(err, check.IsNil)
+ c.Check(string(lost), check.Equals, "")
+}
- stop := make(chan interface{})
- s.config.Collections.BalancePeriod = arvados.Duration(time.Millisecond)
+func (s *runSuite) TestRunForever_TriggeredByTimer(c *check.C) {
+ s.config.ManagementToken = "xyzzy"
+ opts := RunOptions{
+ Logger: ctxlog.TestLogger(c),
+ Dumper: ctxlog.TestLogger(c),
+ }
+ s.stub.serveCurrentUserAdmin()
+ s.stub.serveFooBarFileCollections()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
+ s.stub.serveKeepstoreIndexFoo4Bar1()
+ trashReqs := s.stub.serveKeepstoreTrash()
+ pullReqs := s.stub.serveKeepstorePull()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ s.config.Collections.BalancePeriod = arvados.Duration(10 * time.Millisecond)
srv := s.newServer(&opts)
done := make(chan bool)
go func() {
- srv.runForever(stop)
+ srv.runForever(ctx)
close(done)
}()
// Each run should send 4 pull lists + 4 trash lists. The
// first run should also send 4 empty trash lists at
- // startup. We should complete all four runs in much less than
- // a second.
- for t0 := time.Now(); pullReqs.Count() < 16 && time.Since(t0) < 10*time.Second; {
+ // startup. We should complete at least four runs in much less
+ // than 10s.
+ for t0 := time.Now(); time.Since(t0) < 10*time.Second; {
+ pulls := pullReqs.Count()
+ if pulls >= 16 && trashReqs.Count() == pulls+4 {
+ break
+ }
time.Sleep(time.Millisecond)
}
- stop <- true
+ cancel()
<-done
c.Check(pullReqs.Count() >= 16, check.Equals, true)
- c.Check(trashReqs.Count(), check.Equals, pullReqs.Count()+4)
+ c.Check(trashReqs.Count() >= 20, check.Equals, true)
- buf, err := s.getMetrics(c, srv)
- c.Check(err, check.IsNil)
- c.Check(buf, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count `+fmt.Sprintf("%d", pullReqs.Count()/4)+`\n.*`)
+ // We should have completed 4 runs before calling cancel().
+ // But the next run might also have started before we called
+ // cancel(), in which case the extra run will be included in
+ // the changeset_compute_seconds_count metric.
+ completed := pullReqs.Count() / 4
+ metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)
+ c.Check(metrics, check.Matches, fmt.Sprintf(`(?ms).*\narvados_keepbalance_changeset_compute_seconds_count (%d|%d)\n.*`, completed, completed+1))
}
-func (s *runSuite) getMetrics(c *check.C, srv *Server) (*bytes.Buffer, error) {
- mfs, err := srv.Metrics.reg.Gather()
- if err != nil {
- return nil, err
+func (s *runSuite) TestRunForever_TriggeredBySignal(c *check.C) {
+ s.config.ManagementToken = "xyzzy"
+ opts := RunOptions{
+ Logger: ctxlog.TestLogger(c),
+ Dumper: ctxlog.TestLogger(c),
}
+ s.stub.serveCurrentUserAdmin()
+ s.stub.serveFooBarFileCollections()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
+ s.stub.serveKeepstoreIndexFoo4Bar1()
+ trashReqs := s.stub.serveKeepstoreTrash()
+ pullReqs := s.stub.serveKeepstorePull()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ s.config.Collections.BalancePeriod = arvados.Duration(time.Minute)
+ srv := s.newServer(&opts)
- var buf bytes.Buffer
- for _, mf := range mfs {
- if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil {
- return nil, err
+ done := make(chan bool)
+ go func() {
+ srv.runForever(ctx)
+ close(done)
+ }()
+
+ procself, err := os.FindProcess(os.Getpid())
+ c.Assert(err, check.IsNil)
+
+ // Each run should send 4 pull lists + 4 trash lists. The
+ // first run should also send 4 empty trash lists at
+ // startup. We should be able to complete four runs in much
+ // less than 10s.
+ completedRuns := 0
+ for t0 := time.Now(); time.Since(t0) < 10*time.Second; {
+ pulls := pullReqs.Count()
+ if pulls >= 16 && trashReqs.Count() == pulls+4 {
+ break
}
+ // Once the 1st run has started automatically, we
+ // start sending a single SIGUSR1 at the end of each
+ // run, to ensure we get exactly 4 runs in total.
+ if pulls > 0 && pulls%4 == 0 && pulls <= 12 && pulls/4 > completedRuns {
+ completedRuns = pulls / 4
+ c.Logf("completed run %d, sending SIGUSR1 to trigger next run", completedRuns)
+ procself.Signal(syscall.SIGUSR1)
+ }
+ time.Sleep(time.Millisecond)
}
+ cancel()
+ <-done
+ c.Check(pullReqs.Count(), check.Equals, 16)
+ c.Check(trashReqs.Count(), check.Equals, 20)
- return &buf, nil
+ metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)
+ c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 4\n.*`)
}
diff --git a/services/keep-balance/balance_test.go b/services/keep-balance/balance_test.go
index 6626609b57..85d4ff8b5d 100644
--- a/services/keep-balance/balance_test.go
+++ b/services/keep-balance/balance_test.go
@@ -12,6 +12,7 @@ import (
"testing"
"time"
+ "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
@@ -26,6 +27,7 @@ var _ = check.Suite(&balancerSuite{})
type balancerSuite struct {
Balancer
+ config *arvados.Cluster
srvs []*KeepService
blks map[string]tester
knownRendezvous [][]int
@@ -72,6 +74,11 @@ func (bal *balancerSuite) SetUpSuite(c *check.C) {
bal.signatureTTL = 3600
bal.Logger = ctxlog.TestLogger(c)
+
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.Equals, nil)
+ bal.config, err = cfg.GetCluster("")
+ c.Assert(err, check.Equals, nil)
}
func (bal *balancerSuite) SetUpTest(c *check.C) {
@@ -87,6 +94,8 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
KeepMount: arvados.KeepMount{
UUID: fmt.Sprintf("zzzzz-mount-%015x", i),
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
},
KeepService: srv,
}}
@@ -153,15 +162,53 @@ func (bal *balancerSuite) TestSkipReadonly(c *check.C) {
}})
}
+func (bal *balancerSuite) TestAllowTrashWhenReadOnly(c *check.C) {
+ srvs := bal.srvList(0, slots{3})
+ srvs[0].mounts[0].KeepMount.AllowWrite = false
+ srvs[0].mounts[0].KeepMount.AllowTrash = true
+ // can't pull to slot 3, so pull to slot 4 instead
+ bal.try(c, tester{
+ desired: map[string]int{"default": 4},
+ current: slots{0, 1},
+ shouldPull: slots{2, 4},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ pulling: 2,
+ }})
+ // expect to be able to trash slot 3 in future, so pull to
+ // slot 1
+ bal.try(c, tester{
+ desired: map[string]int{"default": 2},
+ current: slots{0, 3},
+ shouldPull: slots{1},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ pulling: 1,
+ }})
+ // trash excess from slot 3
+ bal.try(c, tester{
+ desired: map[string]int{"default": 2},
+ current: slots{0, 1, 3},
+ shouldTrash: slots{3},
+ expectBlockState: &balancedBlockState{
+ needed: 2,
+ unneeded: 1,
+ }})
+}
+
func (bal *balancerSuite) TestMultipleViewsReadOnly(c *check.C) {
- bal.testMultipleViews(c, true)
+ bal.testMultipleViews(c, false, false)
+}
+
+func (bal *balancerSuite) TestMultipleViewsReadOnlyAllowTrash(c *check.C) {
+ bal.testMultipleViews(c, false, true)
}
func (bal *balancerSuite) TestMultipleViews(c *check.C) {
- bal.testMultipleViews(c, false)
+ bal.testMultipleViews(c, true, true)
}
-func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
+func (bal *balancerSuite) testMultipleViews(c *check.C, allowWrite, allowTrash bool) {
for i, srv := range bal.srvs {
// Add a mount to each service
srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i)
@@ -169,7 +216,8 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
KeepMount: arvados.KeepMount{
DeviceID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
UUID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
- ReadOnly: readonly,
+ AllowWrite: allowWrite,
+ AllowTrash: allowTrash,
Replication: 1,
StorageClasses: map[string]bool{"default": true},
},
@@ -188,11 +236,12 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
desired: map[string]int{"default": 1},
current: slots{0, i, i},
shouldTrash: slots{i}})
- } else if readonly {
+ } else if !allowTrash {
// Timestamps are all different, and the third
// replica can't be trashed because it's on a
- // read-only mount, so the first two replicas
- // should be trashed.
+ // read-only mount (with
+ // AllowTrashWhenReadOnly=false), so the first
+ // two replicas should be trashed.
bal.try(c, tester{
desired: map[string]int{"default": 1},
current: slots{0, i, i},
@@ -321,6 +370,35 @@ func (bal *balancerSuite) TestDecreaseReplTimestampCollision(c *check.C) {
desired: map[string]int{"default": 2},
current: slots{0, 1, 2},
timestamps: []int64{12345678, 10000000, 10000000}})
+ bal.try(c, tester{
+ desired: map[string]int{"default": 0},
+ current: slots{0, 1, 2},
+ timestamps: []int64{12345678, 12345678, 12345678},
+ shouldTrash: slots{0},
+ shouldTrashMounts: []string{
+ bal.srvs[bal.knownRendezvous[0][0]].mounts[0].UUID}})
+ bal.try(c, tester{
+ desired: map[string]int{"default": 2},
+ current: slots{0, 1, 2, 5, 6},
+ timestamps: []int64{12345678, 12345679, 10000000, 10000000, 10000000},
+ shouldTrash: slots{2},
+ shouldTrashMounts: []string{
+ bal.srvs[bal.knownRendezvous[0][2]].mounts[0].UUID}})
+ bal.try(c, tester{
+ desired: map[string]int{"default": 2},
+ current: slots{0, 1, 2, 5, 6},
+ timestamps: []int64{12345678, 12345679, 12345671, 10000000, 10000000},
+ shouldTrash: slots{2, 5},
+ shouldTrashMounts: []string{
+ bal.srvs[bal.knownRendezvous[0][2]].mounts[0].UUID,
+ bal.srvs[bal.knownRendezvous[0][5]].mounts[0].UUID}})
+ bal.try(c, tester{
+ desired: map[string]int{"default": 2},
+ current: slots{0, 1, 2, 5, 6},
+ timestamps: []int64{12345678, 12345679, 12345679, 10000000, 10000000},
+ shouldTrash: slots{5},
+ shouldTrashMounts: []string{
+ bal.srvs[bal.knownRendezvous[0][5]].mounts[0].UUID}})
}
func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
@@ -345,7 +423,7 @@ func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
}
func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
- bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
+ bal.srvs[3].mounts[0].KeepMount.AllowWrite = false
bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
@@ -554,6 +632,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
// classes=[special,special2].
bal.srvs[9].mounts = []*KeepMount{{
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"special": true},
UUID: "zzzzz-mount-special00000009",
@@ -562,6 +642,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
KeepService: bal.srvs[9],
}, {
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"special": true, "special2": true},
UUID: "zzzzz-mount-special20000009",
@@ -574,6 +656,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
// classes=[special3], one with classes=[default].
bal.srvs[13].mounts = []*KeepMount{{
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"special2": true},
UUID: "zzzzz-mount-special2000000d",
@@ -582,6 +666,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
KeepService: bal.srvs[13],
}, {
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"default": true},
UUID: "zzzzz-mount-00000000000000d",
@@ -664,7 +750,7 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
// the appropriate changes for that block have been added to the
// changesets.
func (bal *balancerSuite) try(c *check.C, t tester) {
- bal.setupLookupTables()
+ bal.setupLookupTables(bal.config)
blk := &BlockState{
Replicas: bal.replList(t.known, t.current),
Desired: t.desired,
@@ -672,9 +758,6 @@ func (bal *balancerSuite) try(c *check.C, t tester) {
for i, t := range t.timestamps {
blk.Replicas[i].Mtime = t
}
- for _, srv := range bal.srvs {
- srv.ChangeSet = &ChangeSet{}
- }
result := bal.balanceBlock(knownBlkid(t.known), blk)
var didPull, didTrash slots
diff --git a/services/keep-balance/block_state.go b/services/keep-balance/block_state.go
index 07c9952f90..63a994096b 100644
--- a/services/keep-balance/block_state.go
+++ b/services/keep-balance/block_state.go
@@ -151,7 +151,11 @@ func (bsm *BlockStateMap) GetConfirmedReplication(blkids []arvados.SizedDigest,
for _, c := range classes {
perclass[c] = 0
}
- for _, r := range bsm.get(blkid).Replicas {
+ bs, ok := bsm.entries[blkid]
+ if !ok {
+ return 0
+ }
+ for _, r := range bs.Replicas {
total += r.KeepMount.Replication
mntclasses := r.KeepMount.StorageClasses
if len(mntclasses) == 0 {
diff --git a/services/keep-balance/block_state_test.go b/services/keep-balance/block_state_test.go
index 8a58be288f..c6076bbd3d 100644
--- a/services/keep-balance/block_state_test.go
+++ b/services/keep-balance/block_state_test.go
@@ -5,6 +5,7 @@
package keepbalance
import (
+ "sync"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -92,3 +93,25 @@ func (s *confirmedReplicationSuite) TestBlocksOnMultipleMounts(c *check.C) {
n = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(40), knownBlkid(41)}, nil)
c.Check(n, check.Equals, 4)
}
+
+func (s *confirmedReplicationSuite) TestConcurrency(c *check.C) {
+ var wg sync.WaitGroup
+ for i := 1000; i < 1256; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ n := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(i), knownBlkid(i)}, []string{"default"})
+ c.Check(n, check.Equals, 0)
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ n := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(10)}, []string{"default"})
+ c.Check(n, check.Equals, 1)
+ n = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(20)}, []string{"default"})
+ c.Check(n, check.Equals, 2)
+ }()
+ }
+ wg.Wait()
+}
diff --git a/services/keep-balance/change_set.go b/services/keep-balance/change_set.go
index 8e0ba028ac..771e277d60 100644
--- a/services/keep-balance/change_set.go
+++ b/services/keep-balance/change_set.go
@@ -10,6 +10,7 @@ import (
"sync"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/services/keepstore"
)
// Pull is a request to retrieve a block from a remote server, and
@@ -23,13 +24,8 @@ type Pull struct {
// MarshalJSON formats a pull request the way keepstore wants to see
// it.
func (p Pull) MarshalJSON() ([]byte, error) {
- type KeepstorePullRequest struct {
- Locator string `json:"locator"`
- Servers []string `json:"servers"`
- MountUUID string `json:"mount_uuid"`
- }
- return json.Marshal(KeepstorePullRequest{
- Locator: string(p.SizedDigest[:32]),
+ return json.Marshal(keepstore.PullListItem{
+ Locator: string(p.SizedDigest),
Servers: []string{p.From.URLBase()},
MountUUID: p.To.KeepMount.UUID,
})
@@ -45,13 +41,8 @@ type Trash struct {
// MarshalJSON formats a trash request the way keepstore wants to see
// it, i.e., as a bare locator with no +size hint.
func (t Trash) MarshalJSON() ([]byte, error) {
- type KeepstoreTrashRequest struct {
- Locator string `json:"locator"`
- BlockMtime int64 `json:"block_mtime"`
- MountUUID string `json:"mount_uuid"`
- }
- return json.Marshal(KeepstoreTrashRequest{
- Locator: string(t.SizedDigest[:32]),
+ return json.Marshal(keepstore.TrashListItem{
+ Locator: string(t.SizedDigest),
BlockMtime: t.Mtime,
MountUUID: t.From.KeepMount.UUID,
})
@@ -60,22 +51,35 @@ func (t Trash) MarshalJSON() ([]byte, error) {
// ChangeSet is a set of change requests that will be sent to a
// keepstore server.
type ChangeSet struct {
- Pulls []Pull
- Trashes []Trash
- mutex sync.Mutex
+ PullLimit int
+ TrashLimit int
+
+ Pulls []Pull
+ PullsDeferred int // number that weren't added because of PullLimit
+ Trashes []Trash
+ TrashesDeferred int // number that weren't added because of TrashLimit
+ mutex sync.Mutex
}
// AddPull adds a Pull operation.
func (cs *ChangeSet) AddPull(p Pull) {
cs.mutex.Lock()
- cs.Pulls = append(cs.Pulls, p)
+ if len(cs.Pulls) < cs.PullLimit {
+ cs.Pulls = append(cs.Pulls, p)
+ } else {
+ cs.PullsDeferred++
+ }
cs.mutex.Unlock()
}
// AddTrash adds a Trash operation
func (cs *ChangeSet) AddTrash(t Trash) {
cs.mutex.Lock()
- cs.Trashes = append(cs.Trashes, t)
+ if len(cs.Trashes) < cs.TrashLimit {
+ cs.Trashes = append(cs.Trashes, t)
+ } else {
+ cs.TrashesDeferred++
+ }
cs.mutex.Unlock()
}
@@ -83,5 +87,5 @@ func (cs *ChangeSet) AddTrash(t Trash) {
func (cs *ChangeSet) String() string {
cs.mutex.Lock()
defer cs.mutex.Unlock()
- return fmt.Sprintf("ChangeSet{Pulls:%d, Trashes:%d}", len(cs.Pulls), len(cs.Trashes))
+ return fmt.Sprintf("ChangeSet{Pulls:%d, Trashes:%d} Deferred{Pulls:%d Trashes:%d}", len(cs.Pulls), len(cs.Trashes), cs.PullsDeferred, cs.TrashesDeferred)
}
diff --git a/services/keep-balance/change_set_test.go b/services/keep-balance/change_set_test.go
index 5474d29fb5..f2b9429017 100644
--- a/services/keep-balance/change_set_test.go
+++ b/services/keep-balance/change_set_test.go
@@ -33,12 +33,12 @@ func (s *changeSetSuite) TestJSONFormat(c *check.C) {
To: mnt,
From: srv}})
c.Check(err, check.IsNil)
- c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","servers":["http://keep1.zzzzz.arvadosapi.com:25107"],"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
+ c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3","servers":["http://keep1.zzzzz.arvadosapi.com:25107"],"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
buf, err = json.Marshal([]Trash{{
SizedDigest: arvados.SizedDigest("acbd18db4cc2f85cedef654fccc4a4d8+3"),
From: mnt,
Mtime: 123456789}})
c.Check(err, check.IsNil)
- c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","block_mtime":123456789,"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
+ c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3","block_mtime":123456789,"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
}
diff --git a/services/keep-balance/collection.go b/services/keep-balance/collection.go
index ccb01bdd10..d7a3fd981d 100644
--- a/services/keep-balance/collection.go
+++ b/services/keep-balance/collection.go
@@ -152,7 +152,7 @@ func (bal *Balancer) updateCollections(ctx context.Context, c *arvados.Client, c
// Use about 1 goroutine per 2 CPUs. Based on experiments with
// a 2-core host, using more concurrent database
// calls/transactions makes this process slower, not faster.
- for i := 0; i < runtime.NumCPU()+1/2; i++ {
+ for i := 0; i < (runtime.NumCPU()+1)/2; i++ {
wg.Add(1)
goSendErr(errs, func() error {
defer wg.Done()
diff --git a/services/keep-balance/integration_test.go b/services/keep-balance/integration_test.go
index 3cfb5cdeda..20d0040b1f 100644
--- a/services/keep-balance/integration_test.go
+++ b/services/keep-balance/integration_test.go
@@ -6,6 +6,7 @@ package keepbalance
import (
"bytes"
+ "context"
"io"
"os"
"strings"
@@ -46,6 +47,7 @@ func (s *integrationSuite) SetUpSuite(c *check.C) {
s.keepClient, err = keepclient.MakeKeepClient(arv)
c.Assert(err, check.IsNil)
+ s.keepClient.DiskCacheSize = keepclient.DiskCacheDisabled
s.putReplicas(c, "foo", 4)
s.putReplicas(c, "bar", 1)
}
@@ -86,8 +88,6 @@ func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
logger := logrus.New()
logger.Out = io.MultiWriter(&logBuf, os.Stderr)
opts := RunOptions{
- CommitPulls: true,
- CommitTrash: true,
CommitConfirmedFields: true,
Logger: logger,
}
@@ -97,14 +97,13 @@ func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
Logger: logger,
Metrics: newMetrics(prometheus.NewRegistry()),
}
- nextOpts, err := bal.Run(s.client, s.config, opts)
+ nextOpts, err := bal.Run(context.Background(), s.client, s.config, opts)
c.Check(err, check.IsNil)
c.Check(nextOpts.SafeRendezvousState, check.Not(check.Equals), "")
- c.Check(nextOpts.CommitPulls, check.Equals, true)
if iter == 0 {
c.Check(logBuf.String(), check.Matches, `(?ms).*ChangeSet{Pulls:1.*`)
c.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*ChangeSet{.*Trashes:[^0]}*`)
- } else if strings.Contains(logBuf.String(), "ChangeSet{Pulls:0") {
+ } else if !strings.Contains(logBuf.String(), "ChangeSet{Pulls:1") {
break
}
time.Sleep(200 * time.Millisecond)
diff --git a/services/keep-balance/main.go b/services/keep-balance/main.go
index f0b0df5bd3..ec1cb18ee1 100644
--- a/services/keep-balance/main.go
+++ b/services/keep-balance/main.go
@@ -32,12 +32,14 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
flags := flag.NewFlagSet(prog, flag.ContinueOnError)
flags.BoolVar(&options.Once, "once", false,
"balance once and then exit")
- flags.BoolVar(&options.CommitPulls, "commit-pulls", false,
- "send pull requests (make more replicas of blocks that are underreplicated or are not in optimal rendezvous probe order)")
- flags.BoolVar(&options.CommitTrash, "commit-trash", false,
- "send trash requests (delete unreferenced old blocks, and excess replicas of overreplicated blocks)")
+ deprCommitPulls := flags.Bool("commit-pulls", true,
+ "send pull requests (must be true -- configure Collections.BalancePullLimit = 0 to disable.)")
+ deprCommitTrash := flags.Bool("commit-trash", true,
+ "send trash requests (must be true -- configure Collections.BalanceTrashLimit = 0 to disable.)")
flags.BoolVar(&options.CommitConfirmedFields, "commit-confirmed-fields", true,
"update collection fields (replicas_confirmed, storage_classes_confirmed, etc.)")
+ flags.StringVar(&options.ChunkPrefix, "chunk-prefix", "",
+ "operate only on blocks with the given prefix (experimental, see https://dev.arvados.org/issues/19923)")
// These options are implemented by service.Command, so we
// don't need the vars here -- we just need the flags
// to pass flags.Parse().
@@ -53,6 +55,13 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
return code
}
+ if !*deprCommitPulls || !*deprCommitTrash {
+ fmt.Fprint(stderr,
+ "Usage error: the -commit-pulls or -commit-trash command line flags are no longer supported.\n",
+ "Use Collections.BalancePullLimit and Collections.BalanceTrashLimit instead.\n")
+ return cmd.EXIT_INVALIDARGUMENT
+ }
+
// Drop our custom args that would be rejected by the generic
// service.Command
args = nil
@@ -112,7 +121,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
Routes: health.Routes{"ping": srv.CheckHealth},
}
- go srv.run()
+ go srv.run(ctx)
return srv
}).RunCommand(prog, args, stdin, stdout, stderr)
}
diff --git a/services/keep-balance/metrics.go b/services/keep-balance/metrics.go
index 4683b67b98..02cee3955f 100644
--- a/services/keep-balance/metrics.go
+++ b/services/keep-balance/metrics.go
@@ -7,6 +7,7 @@ package keepbalance
import (
"fmt"
"net/http"
+ "strconv"
"sync"
"github.com/prometheus/client_golang/prometheus"
@@ -17,18 +18,20 @@ type observer interface{ Observe(float64) }
type setter interface{ Set(float64) }
type metrics struct {
- reg *prometheus.Registry
- statsGauges map[string]setter
- observers map[string]observer
- setupOnce sync.Once
- mtx sync.Mutex
+ reg *prometheus.Registry
+ statsGauges map[string]setter
+ statsGaugeVecs map[string]*prometheus.GaugeVec
+ observers map[string]observer
+ setupOnce sync.Once
+ mtx sync.Mutex
}
func newMetrics(registry *prometheus.Registry) *metrics {
return &metrics{
- reg: registry,
- statsGauges: map[string]setter{},
- observers: map[string]observer{},
+ reg: registry,
+ statsGauges: map[string]setter{},
+ statsGaugeVecs: map[string]*prometheus.GaugeVec{},
+ observers: map[string]observer{},
}
}
@@ -63,9 +66,24 @@ func (m *metrics) UpdateStats(s balancerStats) {
"transient": {s.unref, "transient (unreferenced, new)"},
"overreplicated": {s.overrep, "overreplicated"},
"underreplicated": {s.underrep, "underreplicated"},
+ "unachievable": {s.unachievable, "unachievable"},
+ "balanced": {s.justright, "optimally balanced"},
+ "desired": {s.desired, "desired"},
"lost": {s.lost, "lost"},
"dedup_byte_ratio": {s.dedupByteRatio(), "deduplication ratio, bytes referenced / bytes stored"},
"dedup_block_ratio": {s.dedupBlockRatio(), "deduplication ratio, blocks referenced / blocks stored"},
+ "collection_bytes": {s.collectionBytes, "total apparent size of all collections"},
+ "referenced_bytes": {s.collectionBlockBytes, "total size of unique referenced blocks"},
+ "reference_count": {s.collectionBlockRefs, "block references in all collections"},
+ "referenced_blocks": {s.collectionBlocks, "blocks referenced by any collection"},
+
+ "pull_entries_sent_count": {s.pulls, "total entries sent in pull lists"},
+ "pull_entries_deferred_count": {s.pullsDeferred, "total entries deferred (not sent) in pull lists"},
+ "trash_entries_sent_count": {s.trashes, "total entries sent in trash lists"},
+ "trash_entries_deferred_count": {s.trashesDeferred, "total entries deferred (not sent) in trash lists"},
+
+ "replicated_block_count": {s.replHistogram, "blocks with indicated number of replicas at last count"},
+ "usage": {s.classStats, "stored in indicated storage class"},
}
m.setupOnce.Do(func() {
// Register gauge(s) for each balancerStats field.
@@ -87,6 +105,29 @@ func (m *metrics) UpdateStats(s balancerStats) {
}
case int, int64, float64:
addGauge(name, gauge.Help)
+ case []int:
+ // replHistogram
+ gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Name: name,
+ Subsystem: "keep",
+ Help: gauge.Help,
+ }, []string{"replicas"})
+ m.reg.MustRegister(gv)
+ m.statsGaugeVecs[name] = gv
+ case map[string]replicationStats:
+ // classStats
+ for _, sub := range []string{"blocks", "bytes", "replicas"} {
+ name := name + "_" + sub
+ gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Name: name,
+ Subsystem: "keep",
+ Help: gauge.Help,
+ }, []string{"storage_class", "status"})
+ m.reg.MustRegister(gv)
+ m.statsGaugeVecs[name] = gv
+ }
default:
panic(fmt.Sprintf("bad gauge type %T", gauge.Value))
}
@@ -105,6 +146,38 @@ func (m *metrics) UpdateStats(s balancerStats) {
m.statsGauges[name].Set(float64(val))
case float64:
m.statsGauges[name].Set(float64(val))
+ case []int:
+ // replHistogram
+ for r, n := range val {
+ m.statsGaugeVecs[name].WithLabelValues(strconv.Itoa(r)).Set(float64(n))
+ }
+ // Record zero for higher-than-max-replication
+ // metrics, so we don't incorrectly continue
+ // to report stale metrics.
+ //
+ // For example, if we previously reported n=1
+ // for repl=6, but have since restarted
+ // keep-balance and the most replicated block
+ // now has repl=5, then the repl=6 gauge will
+ // still say n=1 until we clear it explicitly
+ // here.
+ for r := len(val); r < len(val)+4 || r < len(val)*2; r++ {
+ m.statsGaugeVecs[name].WithLabelValues(strconv.Itoa(r)).Set(0)
+ }
+ case map[string]replicationStats:
+ // classStats
+ for class, cs := range val {
+ for label, val := range map[string]blocksNBytes{
+ "needed": cs.needed,
+ "unneeded": cs.unneeded,
+ "pulling": cs.pulling,
+ "unachievable": cs.unachievable,
+ } {
+ m.statsGaugeVecs[name+"_blocks"].WithLabelValues(class, label).Set(float64(val.blocks))
+ m.statsGaugeVecs[name+"_bytes"].WithLabelValues(class, label).Set(float64(val.bytes))
+ m.statsGaugeVecs[name+"_replicas"].WithLabelValues(class, label).Set(float64(val.replicas))
+ }
+ }
default:
panic(fmt.Sprintf("bad gauge type %T", gauge.Value))
}
diff --git a/services/keep-balance/server.go b/services/keep-balance/server.go
index e485f5b206..7a59c1e8c0 100644
--- a/services/keep-balance/server.go
+++ b/services/keep-balance/server.go
@@ -5,12 +5,14 @@
package keepbalance
import (
+ "context"
"net/http"
"os"
"os/signal"
"syscall"
"time"
+ "git.arvados.org/arvados.git/lib/controller/dblock"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus"
@@ -25,9 +27,8 @@ import (
// RunOptions fields are controlled by command line flags.
type RunOptions struct {
Once bool
- CommitPulls bool
- CommitTrash bool
CommitConfirmedFields bool
+ ChunkPrefix string
Logger logrus.FieldLogger
Dumper logrus.FieldLogger
@@ -62,12 +63,12 @@ func (srv *Server) Done() <-chan struct{} {
return nil
}
-func (srv *Server) run() {
+func (srv *Server) run(ctx context.Context) {
var err error
if srv.RunOptions.Once {
- _, err = srv.runOnce()
+ _, err = srv.runOnce(ctx)
} else {
- err = srv.runForever(nil)
+ err = srv.runForever(ctx)
}
if err != nil {
srv.Logger.Error(err)
@@ -77,40 +78,47 @@ func (srv *Server) run() {
}
}
-func (srv *Server) runOnce() (*Balancer, error) {
+func (srv *Server) runOnce(ctx context.Context) (*Balancer, error) {
bal := &Balancer{
DB: srv.DB,
Logger: srv.Logger,
Dumper: srv.Dumper,
Metrics: srv.Metrics,
LostBlocksFile: srv.Cluster.Collections.BlobMissingReport,
+ ChunkPrefix: srv.RunOptions.ChunkPrefix,
}
var err error
- srv.RunOptions, err = bal.Run(srv.ArvClient, srv.Cluster, srv.RunOptions)
+ srv.RunOptions, err = bal.Run(ctx, srv.ArvClient, srv.Cluster, srv.RunOptions)
return bal, err
}
-// RunForever runs forever, or (for testing purposes) until the given
-// stop channel is ready to receive.
-func (srv *Server) runForever(stop <-chan interface{}) error {
+// RunForever runs forever, or until ctx is cancelled.
+func (srv *Server) runForever(ctx context.Context) error {
logger := srv.Logger
ticker := time.NewTicker(time.Duration(srv.Cluster.Collections.BalancePeriod))
- // The unbuffered channel here means we only hear SIGUSR1 if
- // it arrives while we're waiting in select{}.
- sigUSR1 := make(chan os.Signal)
+ sigUSR1 := make(chan os.Signal, 1)
signal.Notify(sigUSR1, syscall.SIGUSR1)
+ defer signal.Stop(sigUSR1)
+
+ logger.Info("acquiring service lock")
+ dblock.KeepBalanceService.Lock(ctx, func(context.Context) (*sqlx.DB, error) { return srv.DB, nil })
+ defer dblock.KeepBalanceService.Unlock()
logger.Printf("starting up: will scan every %v and on SIGUSR1", srv.Cluster.Collections.BalancePeriod)
for {
- if !srv.RunOptions.CommitPulls && !srv.RunOptions.CommitTrash {
+ if srv.Cluster.Collections.BalancePullLimit < 1 && srv.Cluster.Collections.BalanceTrashLimit < 1 {
logger.Print("WARNING: Will scan periodically, but no changes will be committed.")
- logger.Print("======= Consider using -commit-pulls and -commit-trash flags.")
+ logger.Print("======= To commit changes, set BalancePullLimit and BalanceTrashLimit values greater than zero.")
}
- _, err := srv.runOnce()
+ if !dblock.KeepBalanceService.Check() {
+ // context canceled
+ return nil
+ }
+ _, err := srv.runOnce(ctx)
if err != nil {
logger.Print("run failed: ", err)
} else {
@@ -118,8 +126,7 @@ func (srv *Server) runForever(stop <-chan interface{}) error {
}
select {
- case <-stop:
- signal.Stop(sigUSR1)
+ case <-ctx.Done():
return nil
case <-ticker.C:
logger.Print("timer went off")
@@ -128,8 +135,7 @@ func (srv *Server) runForever(stop <-chan interface{}) error {
// Reset the timer so we don't start the N+1st
// run too soon after the Nth run is triggered
// by SIGUSR1.
- ticker.Stop()
- ticker = time.NewTicker(time.Duration(srv.Cluster.Collections.BalancePeriod))
+ ticker.Reset(time.Duration(srv.Cluster.Collections.BalancePeriod))
}
logger.Print("starting next run")
}
diff --git a/services/keep-web/cache.go b/services/keep-web/cache.go
index d5fdc4997e..d443bc0829 100644
--- a/services/keep-web/cache.go
+++ b/services/keep-web/cache.go
@@ -5,14 +5,15 @@
package keepweb
import (
+ "errors"
+ "net/http"
+ "sort"
"sync"
- "sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/keepclient"
- lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
@@ -20,74 +21,33 @@ import (
const metricsUpdateInterval = time.Second / 10
type cache struct {
- cluster *arvados.Cluster
- logger logrus.FieldLogger
- registry *prometheus.Registry
- metrics cacheMetrics
- pdhs *lru.TwoQueueCache
- collections *lru.TwoQueueCache
- sessions *lru.TwoQueueCache
- setupOnce sync.Once
-
- chPruneSessions chan struct{}
- chPruneCollections chan struct{}
+ cluster *arvados.Cluster
+ logger logrus.FieldLogger
+ registry *prometheus.Registry
+ metrics cacheMetrics
+ sessions map[string]*cachedSession
+ setupOnce sync.Once
+ mtx sync.Mutex
+
+ chPruneSessions chan struct{}
}
type cacheMetrics struct {
- requests prometheus.Counter
- collectionBytes prometheus.Gauge
- collectionEntries prometheus.Gauge
- sessionEntries prometheus.Gauge
- collectionHits prometheus.Counter
- pdhHits prometheus.Counter
- sessionHits prometheus.Counter
- sessionMisses prometheus.Counter
- apiCalls prometheus.Counter
+ requests prometheus.Counter
+ collectionBytes prometheus.Gauge
+ sessionEntries prometheus.Gauge
+ sessionHits prometheus.Counter
+ sessionMisses prometheus.Counter
}
func (m *cacheMetrics) setup(reg *prometheus.Registry) {
- m.requests = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "arvados",
- Subsystem: "keepweb_collectioncache",
- Name: "requests",
- Help: "Number of targetID-to-manifest lookups handled.",
- })
- reg.MustRegister(m.requests)
- m.collectionHits = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "arvados",
- Subsystem: "keepweb_collectioncache",
- Name: "hits",
- Help: "Number of pdh-to-manifest cache hits.",
- })
- reg.MustRegister(m.collectionHits)
- m.pdhHits = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "arvados",
- Subsystem: "keepweb_collectioncache",
- Name: "pdh_hits",
- Help: "Number of uuid-to-pdh cache hits.",
- })
- reg.MustRegister(m.pdhHits)
- m.apiCalls = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "arvados",
- Subsystem: "keepweb_collectioncache",
- Name: "api_calls",
- Help: "Number of outgoing API calls made by cache.",
- })
- reg.MustRegister(m.apiCalls)
m.collectionBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "arvados",
Subsystem: "keepweb_sessions",
- Name: "cached_collection_bytes",
- Help: "Total size of all cached manifests and sessions.",
+ Name: "cached_session_bytes",
+ Help: "Total size of all cached sessions.",
})
reg.MustRegister(m.collectionBytes)
- m.collectionEntries = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "arvados",
- Subsystem: "keepweb_collectioncache",
- Name: "cached_manifests",
- Help: "Number of manifests in cache.",
- })
- reg.MustRegister(m.collectionEntries)
m.sessionEntries = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "arvados",
Subsystem: "keepweb_sessions",
@@ -111,41 +71,70 @@ func (m *cacheMetrics) setup(reg *prometheus.Registry) {
reg.MustRegister(m.sessionMisses)
}
-type cachedPDH struct {
- expire time.Time
- refresh time.Time
- pdh string
-}
-
-type cachedCollection struct {
- expire time.Time
- collection *arvados.Collection
-}
-
-type cachedPermission struct {
- expire time.Time
-}
-
type cachedSession struct {
+ cache *cache
expire time.Time
- fs atomic.Value
client *arvados.Client
arvadosclient *arvadosclient.ArvadosClient
keepclient *keepclient.KeepClient
- user atomic.Value
+
+ // Each session uses a system of three mutexes (plus the
+ // cache-wide mutex) to enable the following semantics:
+ //
+ // - There are never multiple sessions in use for a given
+ // token.
+ //
+ // - If the cached in-memory filesystems/user records are
+ // older than the configured cache TTL when a request starts,
+ // the request will use new ones.
+ //
+ // - Unused sessions are garbage-collected.
+ //
+ // In particular, when it is necessary to reset a session's
+ // filesystem/user record (to save memory or respect the
+ // configured cache TTL), any operations that are already
+ // using the existing filesystem/user record are allowed to
+ // finish before the new filesystem is constructed.
+ //
+ // The locks must be acquired in the following order:
+ // cache.mtx, session.mtx, session.refresh, session.inuse.
+
+ // mtx is RLocked while session is not safe to evict from
+ // cache -- i.e., a checkout() has decided to use it, and its
+ // caller is not finished with it. When locking or rlocking
+ // this mtx, the cache mtx MUST already be held.
+ //
+ // This mutex enables pruneSessions to detect when it is safe
+ // to completely remove the session entry from the cache.
+ mtx sync.RWMutex
+ // refresh must be locked in order to read or write the
+ // fs/user/userLoaded/lastuse fields. This mutex enables
+ // GetSession and pruneSessions to remove/replace fs and user
+ // values safely.
+ refresh sync.Mutex
+ // inuse must be RLocked while the session is in use by a
+ // caller. This mutex enables pruneSessions() to wait for all
+ // existing usage to finish by calling inuse.Lock().
+ inuse sync.RWMutex
+
+ fs arvados.CustomFileSystem
+ user arvados.User
+ userLoaded bool
+ lastuse time.Time
+}
+
+func (sess *cachedSession) Release() {
+ sess.inuse.RUnlock()
+ sess.mtx.RUnlock()
+ select {
+ case sess.cache.chPruneSessions <- struct{}{}:
+ default:
+ }
}
func (c *cache) setup() {
var err error
- c.pdhs, err = lru.New2Q(c.cluster.Collections.WebDAVCache.MaxUUIDEntries)
- if err != nil {
- panic(err)
- }
- c.collections, err = lru.New2Q(c.cluster.Collections.WebDAVCache.MaxCollectionEntries)
- if err != nil {
- panic(err)
- }
- c.sessions, err = lru.New2Q(c.cluster.Collections.WebDAVCache.MaxSessions)
+ c.sessions = map[string]*cachedSession{}
if err != nil {
panic(err)
}
@@ -160,12 +149,6 @@ func (c *cache) setup() {
c.updateGauges()
}
}()
- c.chPruneCollections = make(chan struct{}, 1)
- go func() {
- for range c.chPruneCollections {
- c.pruneCollections()
- }
- }()
c.chPruneSessions = make(chan struct{}, 1)
go func() {
for range c.chPruneSessions {
@@ -175,337 +158,232 @@ func (c *cache) setup() {
}
func (c *cache) updateGauges() {
- c.metrics.collectionBytes.Set(float64(c.collectionBytes()))
- c.metrics.collectionEntries.Set(float64(c.collections.Len()))
- c.metrics.sessionEntries.Set(float64(c.sessions.Len()))
+ n, size := c.sessionsSize()
+ c.metrics.collectionBytes.Set(float64(size))
+ c.metrics.sessionEntries.Set(float64(n))
}
var selectPDH = map[string]interface{}{
"select": []string{"portable_data_hash"},
}
-// Update saves a modified version (fs) to an existing collection
-// (coll) and, if successful, updates the relevant cache entries so
-// subsequent calls to Get() reflect the modifications.
-func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvados.CollectionFileSystem) error {
- c.setupOnce.Do(c.setup)
-
- m, err := fs.MarshalManifest(".")
- if err != nil || m == coll.ManifestText {
- return err
- }
- coll.ManifestText = m
- var updated arvados.Collection
- err = client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
- "collection": map[string]string{
- "manifest_text": coll.ManifestText,
- },
- })
- if err != nil {
- c.pdhs.Remove(coll.UUID)
- return err
- }
- c.collections.Add(client.AuthToken+"\000"+updated.PortableDataHash, &cachedCollection{
- expire: time.Now().Add(time.Duration(c.cluster.Collections.WebDAVCache.TTL)),
- collection: &updated,
- })
- c.pdhs.Add(coll.UUID, &cachedPDH{
- expire: time.Now().Add(time.Duration(c.cluster.Collections.WebDAVCache.TTL)),
- refresh: time.Now().Add(time.Duration(c.cluster.Collections.WebDAVCache.UUIDTTL)),
- pdh: updated.PortableDataHash,
- })
- return nil
-}
-
-// ResetSession unloads any potentially stale state. Should be called
-// after write operations, so subsequent reads don't return stale
-// data.
-func (c *cache) ResetSession(token string) {
- c.setupOnce.Do(c.setup)
- c.sessions.Remove(token)
-}
-
-// Get a long-lived CustomFileSystem suitable for doing a read operation
-// with the given token.
-func (c *cache) GetSession(token string) (arvados.CustomFileSystem, *cachedSession, error) {
+func (c *cache) checkout(token string) (*cachedSession, error) {
c.setupOnce.Do(c.setup)
- now := time.Now()
- ent, _ := c.sessions.Get(token)
- sess, _ := ent.(*cachedSession)
- expired := false
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ sess := c.sessions[token]
if sess == nil {
- c.metrics.sessionMisses.Inc()
- sess = &cachedSession{
- expire: now.Add(c.cluster.Collections.WebDAVCache.TTL.Duration()),
- }
- var err error
- sess.client, err = arvados.NewClientFromConfig(c.cluster)
+ client, err := arvados.NewClientFromConfig(c.cluster)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- sess.client.AuthToken = token
- sess.arvadosclient, err = arvadosclient.New(sess.client)
+ client.AuthToken = token
+ client.Timeout = time.Minute
+ // A non-empty origin header tells controller to
+ // prioritize our traffic as interactive, which is
+ // true most of the time.
+ origin := c.cluster.Services.WebDAVDownload.ExternalURL
+ client.SendHeader = http.Header{"Origin": {origin.Scheme + "://" + origin.Host}}
+ arvadosclient, err := arvadosclient.New(client)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- sess.keepclient = keepclient.New(sess.arvadosclient)
- c.sessions.Add(token, sess)
- } else if sess.expire.Before(now) {
- c.metrics.sessionMisses.Inc()
- expired = true
- } else {
- c.metrics.sessionHits.Inc()
- }
- select {
- case c.chPruneSessions <- struct{}{}:
- default:
- }
- fs, _ := sess.fs.Load().(arvados.CustomFileSystem)
- if fs != nil && !expired {
- return fs, sess, nil
+ sess = &cachedSession{
+ cache: c,
+ client: client,
+ arvadosclient: arvadosclient,
+ keepclient: keepclient.New(arvadosclient),
+ }
+ c.sessions[token] = sess
}
- fs = sess.client.SiteFileSystem(sess.keepclient)
- fs.ForwardSlashNameSubstitution(c.cluster.Collections.ForwardSlashNameSubstitution)
- sess.fs.Store(fs)
- return fs, sess, nil
+ sess.mtx.RLock()
+ return sess, nil
}
-// Remove all expired session cache entries, then remove more entries
-// until approximate remaining size <= maxsize/2
-func (c *cache) pruneSessions() {
- now := time.Now()
- var size int64
- keys := c.sessions.Keys()
- for _, token := range keys {
- ent, ok := c.sessions.Peek(token)
- if !ok {
- continue
- }
- s := ent.(*cachedSession)
- if s.expire.Before(now) {
- c.sessions.Remove(token)
- continue
- }
- if fs, ok := s.fs.Load().(arvados.CustomFileSystem); ok {
- size += fs.MemorySize()
- }
+// Get a long-lived CustomFileSystem suitable for doing a read or
+// write operation with the given token.
+//
+// If the returned error is nil, the caller must call Release() on the
+// returned session when finished using it.
+func (c *cache) GetSession(token string) (arvados.CustomFileSystem, *cachedSession, *arvados.User, error) {
+ sess, err := c.checkout(token)
+ if err != nil {
+ return nil, nil, nil, err
}
- // Remove tokens until reaching size limit, starting with the
- // least frequently used entries (which Keys() returns last).
- for i := len(keys) - 1; i >= 0; i-- {
- token := keys[i]
- if size <= c.cluster.Collections.WebDAVCache.MaxCollectionBytes/2 {
- break
- }
- ent, ok := c.sessions.Peek(token)
- if !ok {
- continue
- }
- s := ent.(*cachedSession)
- fs, _ := s.fs.Load().(arvados.CustomFileSystem)
- if fs == nil {
- continue
+ sess.refresh.Lock()
+ defer sess.refresh.Unlock()
+ now := time.Now()
+ sess.lastuse = now
+ refresh := sess.expire.Before(now)
+ if sess.fs == nil || !sess.userLoaded || refresh {
+ // Wait for all active users to finish (otherwise they
+ // might make changes to an old fs after we start
+ // using the new fs).
+ sess.inuse.Lock()
+ if !sess.userLoaded || refresh {
+ err := sess.client.RequestAndDecode(&sess.user, "GET", "arvados/v1/users/current", nil, nil)
+ if he := errorWithHTTPStatus(nil); errors.As(err, &he) && he.HTTPStatus() == http.StatusForbidden {
+ // token is OK, but "get user id" api is out
+ // of scope -- use existing/expired info if
+ // any, or leave empty for unknown user
+ } else if err != nil {
+ sess.inuse.Unlock()
+ sess.mtx.RUnlock()
+ return nil, nil, nil, err
+ }
+ sess.userLoaded = true
}
- c.sessions.Remove(token)
- size -= fs.MemorySize()
- }
-}
-
-func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceReload bool) (*arvados.Collection, error) {
- c.setupOnce.Do(c.setup)
- c.metrics.requests.Inc()
- var pdhRefresh bool
- var pdh string
- if arvadosclient.PDHMatch(targetID) {
- pdh = targetID
- } else if ent, cached := c.pdhs.Get(targetID); cached {
- ent := ent.(*cachedPDH)
- if ent.expire.Before(time.Now()) {
- c.pdhs.Remove(targetID)
+ if sess.fs == nil || refresh {
+ sess.fs = sess.client.SiteFileSystem(sess.keepclient)
+ sess.fs.ForwardSlashNameSubstitution(c.cluster.Collections.ForwardSlashNameSubstitution)
+ sess.expire = now.Add(c.cluster.Collections.WebDAVCache.TTL.Duration())
+ c.metrics.sessionMisses.Inc()
} else {
- pdh = ent.pdh
- pdhRefresh = forceReload || time.Now().After(ent.refresh)
- c.metrics.pdhHits.Inc()
+ c.metrics.sessionHits.Inc()
}
- }
-
- if pdh == "" {
- // UUID->PDH mapping is not cached, might as well get
- // the whole collection record and be done (below).
- c.logger.Debugf("cache(%s): have no pdh", targetID)
- } else if cached := c.lookupCollection(arv.ApiToken + "\000" + pdh); cached == nil {
- // PDH->manifest is not cached, might as well get the
- // whole collection record (below).
- c.logger.Debugf("cache(%s): have pdh %s but manifest is not cached", targetID, pdh)
- } else if !pdhRefresh {
- // We looked up UUID->PDH very recently, and we still
- // have the manifest for that PDH.
- c.logger.Debugf("cache(%s): have pdh %s and refresh not needed", targetID, pdh)
- return cached, nil
+ sess.inuse.Unlock()
} else {
- // Get current PDH for this UUID (and confirm we still
- // have read permission). Most likely, the cached PDH
- // is still correct, in which case we can use our
- // cached manifest.
- c.metrics.apiCalls.Inc()
- var current arvados.Collection
- err := arv.Get("collections", targetID, selectPDH, ¤t)
- if err != nil {
- return nil, err
- }
- if current.PortableDataHash == pdh {
- // PDH has not changed, cached manifest is
- // correct.
- c.logger.Debugf("cache(%s): verified cached pdh %s is still correct", targetID, pdh)
- return cached, nil
- }
- if cached := c.lookupCollection(arv.ApiToken + "\000" + current.PortableDataHash); cached != nil {
- // PDH changed, and we already have the
- // manifest for that new PDH.
- c.logger.Debugf("cache(%s): cached pdh %s was stale, new pdh is %s and manifest is already in cache", targetID, pdh, current.PortableDataHash)
- return cached, nil
- }
+ c.metrics.sessionHits.Inc()
}
+ sess.inuse.RLock()
+ return sess.fs, sess, &sess.user, nil
+}
- // Either UUID->PDH is not cached, or PDH->manifest is not
- // cached.
- var retrieved arvados.Collection
- c.metrics.apiCalls.Inc()
- err := arv.Get("collections", targetID, nil, &retrieved)
- if err != nil {
- return nil, err
- }
- c.logger.Debugf("cache(%s): retrieved manifest, caching with pdh %s", targetID, retrieved.PortableDataHash)
- exp := time.Now().Add(time.Duration(c.cluster.Collections.WebDAVCache.TTL))
- if targetID != retrieved.PortableDataHash {
- c.pdhs.Add(targetID, &cachedPDH{
- expire: exp,
- refresh: time.Now().Add(time.Duration(c.cluster.Collections.WebDAVCache.UUIDTTL)),
- pdh: retrieved.PortableDataHash,
+type sessionSnapshot struct {
+ token string
+ sess *cachedSession
+ lastuse time.Time
+ fs arvados.CustomFileSystem
+ size int64
+ prune bool
+}
+
+// Remove all expired idle session cache entries, and remove in-memory
+// filesystems until approximate remaining size <= maxsize
+func (c *cache) pruneSessions() {
+ now := time.Now()
+ c.mtx.Lock()
+ snaps := make([]sessionSnapshot, 0, len(c.sessions))
+ for token, sess := range c.sessions {
+ snaps = append(snaps, sessionSnapshot{
+ token: token,
+ sess: sess,
})
}
- c.collections.Add(arv.ApiToken+"\000"+retrieved.PortableDataHash, &cachedCollection{
- expire: exp,
- collection: &retrieved,
- })
- if int64(len(retrieved.ManifestText)) > c.cluster.Collections.WebDAVCache.MaxCollectionBytes/int64(c.cluster.Collections.WebDAVCache.MaxCollectionEntries) {
- select {
- case c.chPruneCollections <- struct{}{}:
- default:
- }
+ c.mtx.Unlock()
+
+ // Load lastuse/fs/expire data from sessions. Note we do this
+ // after unlocking c.mtx because sess.refresh.Lock sometimes
+ // waits for another goroutine to finish "[re]fetch user
+ // record".
+ for i := range snaps {
+ snaps[i].sess.refresh.Lock()
+ snaps[i].lastuse = snaps[i].sess.lastuse
+ snaps[i].fs = snaps[i].sess.fs
+ snaps[i].prune = snaps[i].sess.expire.Before(now)
+ snaps[i].sess.refresh.Unlock()
}
- return &retrieved, nil
-}
-// pruneCollections checks the total bytes occupied by manifest_text
-// in the collection cache and removes old entries as needed to bring
-// the total size down to CollectionBytes. It also deletes all expired
-// entries.
-//
-// pruneCollections does not aim to be perfectly correct when there is
-// concurrent cache activity.
-func (c *cache) pruneCollections() {
+ // Sort sessions with oldest first.
+ sort.Slice(snaps, func(i, j int) bool {
+ return snaps[i].lastuse.Before(snaps[j].lastuse)
+ })
+
+ // Add up size of sessions that aren't already marked for
+ // pruning based on expire time.
var size int64
- now := time.Now()
- keys := c.collections.Keys()
- entsize := make([]int, len(keys))
- expired := make([]bool, len(keys))
- for i, k := range keys {
- v, ok := c.collections.Peek(k)
- if !ok {
- continue
+ for i, snap := range snaps {
+ if !snap.prune && snap.fs != nil {
+ size := snap.fs.MemorySize()
+ snaps[i].size = size
+ size += size
}
- ent := v.(*cachedCollection)
- n := len(ent.collection.ManifestText)
- size += int64(n)
- entsize[i] = n
- expired[i] = ent.expire.Before(now)
}
- for i, k := range keys {
- if expired[i] {
- c.collections.Remove(k)
- size -= int64(entsize[i])
- }
- }
- for i, k := range keys {
- if size <= c.cluster.Collections.WebDAVCache.MaxCollectionBytes/2 {
+ // Mark more sessions for deletion until reaching desired
+ // memory size limit, starting with the oldest entries.
+ for i, snap := range snaps {
+ if size <= int64(c.cluster.Collections.WebDAVCache.MaxCollectionBytes) {
break
}
- if expired[i] {
- // already removed this entry in the previous loop
+ if snap.prune {
continue
}
- c.collections.Remove(k)
- size -= int64(entsize[i])
+ snaps[i].prune = true
+ size -= snap.size
}
-}
-// collectionBytes returns the approximate combined memory size of the
-// collection cache and session filesystem cache.
-func (c *cache) collectionBytes() uint64 {
- var size uint64
- for _, k := range c.collections.Keys() {
- v, ok := c.collections.Peek(k)
- if !ok {
- continue
+ // Mark more sessions for deletion until reaching desired
+ // session count limit.
+ mustprune := len(snaps) - c.cluster.Collections.WebDAVCache.MaxSessions
+ for i := range snaps {
+ if snaps[i].prune {
+ mustprune--
}
- size += uint64(len(v.(*cachedCollection).collection.ManifestText))
}
- for _, token := range c.sessions.Keys() {
- ent, ok := c.sessions.Peek(token)
- if !ok {
- continue
- }
- if fs, ok := ent.(*cachedSession).fs.Load().(arvados.CustomFileSystem); ok {
- size += uint64(fs.MemorySize())
+ for i := range snaps {
+ if mustprune < 1 {
+ break
+ } else if !snaps[i].prune {
+ snaps[i].prune = true
+ mustprune--
}
}
- return size
-}
-func (c *cache) lookupCollection(key string) *arvados.Collection {
- e, cached := c.collections.Get(key)
- if !cached {
- return nil
- }
- ent := e.(*cachedCollection)
- if ent.expire.Before(time.Now()) {
- c.collections.Remove(key)
- return nil
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ for _, snap := range snaps {
+ if !snap.prune {
+ continue
+ }
+ sess := snap.sess
+ if sess.mtx.TryLock() {
+ delete(c.sessions, snap.token)
+ continue
+ }
+ // We can't remove a session that's been checked out
+ // -- that would allow another session to be created
+ // for the same token using a different in-memory
+ // filesystem. Instead, we wait for active requests to
+ // finish and then "unload" it. After this, either the
+ // next GetSession will reload fs/user, or a
+ // subsequent pruneSessions will remove the session.
+ go func() {
+ // Ensure nobody is mid-GetSession() (note we
+ // already know nobody is mid-checkout()
+ // because we have c.mtx locked)
+ sess.refresh.Lock()
+ defer sess.refresh.Unlock()
+ // Wait for current usage to finish (i.e.,
+ // anyone who has decided to use the current
+ // values of sess.fs and sess.user, and hasn't
+ // called Release() yet)
+ sess.inuse.Lock()
+ defer sess.inuse.Unlock()
+ // Release memory
+ sess.fs = nil
+ // Next GetSession will make a new fs
+ }()
}
- c.metrics.collectionHits.Inc()
- return ent.collection
}
-func (c *cache) GetTokenUser(token string) (*arvados.User, error) {
- // Get and cache user record associated with this
- // token. We need to know their UUID for logging, and
- // whether they are an admin or not for certain
- // permission checks.
-
- // Get/create session entry
- _, sess, err := c.GetSession(token)
- if err != nil {
- return nil, err
+// sessionsSize returns the number and approximate total memory size
+// of all cached sessions.
+func (c *cache) sessionsSize() (n int, size int64) {
+ c.mtx.Lock()
+ n = len(c.sessions)
+ sessions := make([]*cachedSession, 0, n)
+ for _, sess := range c.sessions {
+ sessions = append(sessions, sess)
}
-
- // See if the user is already set, and if so, return it
- user, _ := sess.user.Load().(*arvados.User)
- if user != nil {
- return user, nil
- }
-
- // Fetch the user record
- c.metrics.apiCalls.Inc()
- var current arvados.User
-
- err = sess.client.RequestAndDecode(¤t, "GET", "/arvados/v1/users/current", nil, nil)
- if err != nil {
- return nil, err
+ c.mtx.Unlock()
+ for _, sess := range sessions {
+ sess.refresh.Lock()
+ fs := sess.fs
+ sess.refresh.Unlock()
+ if fs != nil {
+ size += fs.MemorySize()
+ }
}
-
- // Stash the user record for next time
- sess.user.Store(¤t)
- return ¤t, nil
+ return
}
diff --git a/services/keep-web/cache_test.go b/services/keep-web/cache_test.go
index 6b8f427171..e95ebcf846 100644
--- a/services/keep-web/cache_test.go
+++ b/services/keep-web/cache_test.go
@@ -5,151 +5,153 @@
package keepweb
import (
- "bytes"
+ "net/http"
+ "net/http/httptest"
+ "regexp"
+ "strings"
+ "time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/common/expfmt"
"gopkg.in/check.v1"
)
-func (s *UnitSuite) checkCacheMetrics(c *check.C, reg *prometheus.Registry, regs ...string) {
- mfs, err := reg.Gather()
- c.Check(err, check.IsNil)
- buf := &bytes.Buffer{}
- enc := expfmt.NewEncoder(buf, expfmt.FmtText)
- for _, mf := range mfs {
- c.Check(enc.Encode(mf), check.IsNil)
- }
- mm := buf.String()
+func (s *IntegrationSuite) checkCacheMetrics(c *check.C, regs ...string) {
+ s.handler.Cache.updateGauges()
+ mm := arvadostest.GatherMetricsAsString(s.handler.Cache.registry)
+ // Remove comments to make the "value vs. regexp" failure
+ // output easier to read.
+ mm = regexp.MustCompile(`(?m)^#.*\n`).ReplaceAllString(mm, "")
for _, reg := range regs {
- c.Check(mm, check.Matches, `(?ms).*collectioncache_`+reg+`\n.*`)
+ c.Check(mm, check.Matches, `(?ms).*keepweb_sessions_`+reg+`\n.*`)
}
}
-func (s *UnitSuite) TestCache(c *check.C) {
- arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, check.Equals, nil)
-
- cache := &cache{
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- registry: prometheus.NewRegistry(),
- }
-
+func (s *IntegrationSuite) TestCache(c *check.C) {
// Hit the same collection 5 times using the same token. Only
// the first req should cause an API call; the next 4 should
// hit all caches.
- arv.ApiToken = arvadostest.AdminToken
- var coll *arvados.Collection
+ u := mustParseURL("http://" + arvadostest.FooCollection + ".keep-web.example/foo")
+ req := &http.Request{
+ Method: "GET",
+ Host: u.Host,
+ URL: u,
+ RequestURI: u.RequestURI(),
+ Header: http.Header{
+ "Authorization": {"Bearer " + arvadostest.ActiveToken},
+ },
+ }
for i := 0; i < 5; i++ {
- coll, err = cache.Get(arv, arvadostest.FooCollection, false)
- c.Check(err, check.Equals, nil)
- c.Assert(coll, check.NotNil)
- c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
- c.Check(coll.ManifestText[:2], check.Equals, ". ")
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
}
- s.checkCacheMetrics(c, cache.registry,
- "requests 5",
+ s.checkCacheMetrics(c,
"hits 4",
- "pdh_hits 4",
- "api_calls 1")
-
- // Hit the same collection 2 more times, this time requesting
- // it by PDH and using a different token. The first req should
- // miss the permission cache and fetch the new manifest; the
- // second should hit the Collection cache and skip the API
- // lookup.
- arv.ApiToken = arvadostest.ActiveToken
-
- coll2, err := cache.Get(arv, arvadostest.FooCollectionPDH, false)
- c.Check(err, check.Equals, nil)
- c.Assert(coll2, check.NotNil)
- c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
- c.Check(coll2.ManifestText[:2], check.Equals, ". ")
- c.Check(coll2.ManifestText, check.Not(check.Equals), coll.ManifestText)
-
- s.checkCacheMetrics(c, cache.registry,
- "requests 6",
- "hits 4",
- "pdh_hits 4",
- "api_calls 2")
-
- coll2, err = cache.Get(arv, arvadostest.FooCollectionPDH, false)
- c.Check(err, check.Equals, nil)
- c.Assert(coll2, check.NotNil)
- c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooCollectionPDH)
- c.Check(coll2.ManifestText[:2], check.Equals, ". ")
-
- s.checkCacheMetrics(c, cache.registry,
- "requests 7",
- "hits 5",
- "pdh_hits 4",
- "api_calls 2")
-
- // Alternating between two collections N times should produce
- // only 2 more API calls.
- arv.ApiToken = arvadostest.AdminToken
- for i := 0; i < 20; i++ {
- var target string
- if i%2 == 0 {
- target = arvadostest.HelloWorldCollection
- } else {
- target = arvadostest.FooBarDirCollection
- }
- _, err := cache.Get(arv, target, false)
- c.Check(err, check.Equals, nil)
+ "misses 1",
+ "active 1")
+
+ // Hit a shared collection 3 times using PDH, using a
+ // different token.
+ u2 := mustParseURL("http://" + strings.Replace(arvadostest.BarFileCollectionPDH, "+", "-", 1) + ".keep-web.example/bar")
+ req2 := &http.Request{
+ Method: "GET",
+ Host: u2.Host,
+ URL: u2,
+ RequestURI: u2.RequestURI(),
+ Header: http.Header{
+ "Authorization": {"Bearer " + arvadostest.SpectatorToken},
+ },
}
- s.checkCacheMetrics(c, cache.registry,
- "requests 27",
- "hits 23",
- "pdh_hits 22",
- "api_calls 4")
-}
-
-func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
- arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, check.Equals, nil)
-
- cache := &cache{
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- registry: prometheus.NewRegistry(),
+ for i := 0; i < 3; i++ {
+ resp2 := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp2, req2)
+ c.Check(resp2.Code, check.Equals, http.StatusOK)
}
-
- for _, forceReload := range []bool{false, true, false, true} {
- _, err := cache.Get(arv, arvadostest.FooCollectionPDH, forceReload)
- c.Check(err, check.Equals, nil)
+ s.checkCacheMetrics(c,
+ "hits 6",
+ "misses 2",
+ "active 2")
+
+ // Alternating between two collections/tokens N times should
+ // use the existing sessions.
+ for i := 0; i < 7; i++ {
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+
+ resp2 := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp2, req2)
+ c.Check(resp2.Code, check.Equals, http.StatusOK)
}
-
- s.checkCacheMetrics(c, cache.registry,
- "requests 4",
- "hits 3",
- "pdh_hits 0",
- "api_calls 1")
+ s.checkCacheMetrics(c,
+ "hits 20",
+ "misses 2",
+ "active 2")
}
-func (s *UnitSuite) TestCacheForceReloadByUUID(c *check.C) {
- arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, check.Equals, nil)
-
- cache := &cache{
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- registry: prometheus.NewRegistry(),
- }
-
- for _, forceReload := range []bool{false, true, false, true} {
- _, err := cache.Get(arv, arvadostest.FooCollection, forceReload)
- c.Check(err, check.Equals, nil)
- }
+func (s *IntegrationSuite) TestForceReloadPDH(c *check.C) {
+ filename := strings.Replace(time.Now().Format(time.RFC3339Nano), ":", ".", -1)
+ manifest := ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:" + filename + "\n"
+ pdh := arvados.PortableDataHash(manifest)
+ client := arvados.NewClientFromEnv()
+ client.AuthToken = arvadostest.ActiveToken
+
+ _, resp := s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/"+filename, arvadostest.ActiveToken, nil)
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+
+ var coll arvados.Collection
+ err := client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": manifest,
+ },
+ })
+ c.Assert(err, check.IsNil)
+ defer client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+coll.UUID, nil, nil)
+ c.Assert(coll.PortableDataHash, check.Equals, pdh)
+
+ _, resp = s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/"+filename, "", http.Header{
+ "Authorization": {"Bearer " + arvadostest.ActiveToken},
+ "Cache-Control": {"must-revalidate"},
+ })
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+
+ _, resp = s.do("GET", "http://"+strings.Replace(pdh, "+", "-", 1)+".keep-web.example/missingfile", "", http.Header{
+ "Authorization": {"Bearer " + arvadostest.ActiveToken},
+ "Cache-Control": {"must-revalidate"},
+ })
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+}
- s.checkCacheMetrics(c, cache.registry,
- "requests 4",
- "hits 3",
- "pdh_hits 3",
- "api_calls 3")
+func (s *IntegrationSuite) TestForceReloadUUID(c *check.C) {
+ client := arvados.NewClientFromEnv()
+ client.AuthToken = arvadostest.ActiveToken
+ var coll arvados.Collection
+ err := client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:oldfile\n",
+ },
+ })
+ c.Assert(err, check.IsNil)
+ defer client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+coll.UUID, nil, nil)
+
+ _, resp := s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", arvadostest.ActiveToken, nil)
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/oldfile", arvadostest.ActiveToken, nil)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", arvadostest.ActiveToken, nil)
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ err = client.RequestAndDecode(&coll, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
+ "collection": map[string]string{
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:oldfile 0:0:newfile\n",
+ },
+ })
+ c.Assert(err, check.IsNil)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", arvadostest.ActiveToken, nil)
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ _, resp = s.do("GET", "http://"+coll.UUID+".keep-web.example/newfile", "", http.Header{
+ "Authorization": {"Bearer " + arvadostest.ActiveToken},
+ "Cache-Control": {"must-revalidate"},
+ })
+ c.Check(resp.Code, check.Equals, http.StatusOK)
}
diff --git a/services/keep-web/cadaver_test.go b/services/keep-web/cadaver_test.go
index 742140f7f3..026deeb5ee 100644
--- a/services/keep-web/cadaver_test.go
+++ b/services/keep-web/cadaver_test.go
@@ -142,6 +142,11 @@ func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc fun
cmd: "move \"test !%20 file\" testfile\n",
match: `(?ms).*Moving .* succeeded.*`,
},
+ {
+ path: writePath,
+ cmd: "mkcol newdir0/\n",
+ match: `(?ms).*Creating .* succeeded.*`,
+ },
{
path: writePath,
cmd: "move testfile newdir0/\n",
diff --git a/services/keep-web/doc.go b/services/keep-web/doc.go
index d2b4c7eb54..4f7d2ca968 100644
--- a/services/keep-web/doc.go
+++ b/services/keep-web/doc.go
@@ -10,44 +10,44 @@
//
// See http://doc.arvados.org/install/install-keep-web.html.
//
-// Configuration
+// # Configuration
//
// The default cluster configuration file location is
// /etc/arvados/config.yml.
//
// Example configuration file
//
-// Clusters:
-// zzzzz:
-// SystemRootToken: ""
-// Services:
-// Controller:
-// ExternalURL: "https://example.com"
-// Insecure: false
-// WebDAV:
-// InternalURLs:
-// "http://:1234/": {}
-// WebDAVDownload:
-// InternalURLs:
-// "http://:1234/": {}
-// ExternalURL: "https://download.example.com/"
-// Users:
-// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxx"
-// Collections:
-// TrustAllContent: false
-//
-// Starting the server
+// Clusters:
+// zzzzz:
+// SystemRootToken: ""
+// Services:
+// Controller:
+// ExternalURL: "https://example.com"
+// Insecure: false
+// WebDAV:
+// InternalURLs:
+// "http://:1234/": {}
+// WebDAVDownload:
+// InternalURLs:
+// "http://:1234/": {}
+// ExternalURL: "https://download.example.com/"
+// Users:
+// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxx"
+// Collections:
+// TrustAllContent: false
+//
+// # Starting the server
//
// Start a server using the default config file
// /etc/arvados/config.yml:
//
-// keep-web
+// keep-web
//
// Start a server using the config file /path/to/config.yml:
//
-// keep-web -config /path/to/config.yml
+// keep-web -config /path/to/config.yml
//
-// Proxy configuration
+// # Proxy configuration
//
// Typically, keep-web is installed behind a proxy like nginx.
//
@@ -74,25 +74,25 @@
// proxy. However, if TLS is not used between nginx and keep-web, the
// intervening networks must be secured by other means.
//
-// Anonymous downloads
+// # Anonymous downloads
//
// The "Users.AnonymousUserToken" configuration entry used when
// when processing anonymous requests, i.e., whenever a web client
// does not supply its own Arvados API token via path, query string,
// cookie, or request header.
//
-// Clusters:
-// zzzzz:
-// Users:
-// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxxxxx"
+// Clusters:
+// zzzzz:
+// Users:
+// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxxxxx"
//
// See http://doc.arvados.org/install/install-keep-web.html for examples.
//
-// Download URLs
+// # Download URLs
//
// See http://doc.arvados.org/api/keep-web-urls.html
//
-// Attachment-Only host
+// # Attachment-Only host
//
// It is possible to serve untrusted content and accept user
// credentials at the same origin as long as the content is only
@@ -103,13 +103,13 @@
// only when the designated origin matches exactly the Host header
// provided by the client or downstream proxy.
//
-// Clusters:
-// zzzzz:
-// Services:
-// WebDAVDownload:
-// ExternalURL: "https://domain.example:9999"
+// Clusters:
+// zzzzz:
+// Services:
+// WebDAVDownload:
+// ExternalURL: "https://domain.example:9999"
//
-// Trust All Content mode
+// # Trust All Content mode
//
// In TrustAllContent mode, Keep-web will accept credentials (API
// tokens) and serve any collection X at
@@ -120,32 +120,31 @@
//
// In such cases you can enable trust-all-content mode.
//
-// Clusters:
-// zzzzz:
-// Collections:
-// TrustAllContent: true
+// Clusters:
+// zzzzz:
+// Collections:
+// TrustAllContent: true
//
// When TrustAllContent is enabled, the only effect of the
// Attachment-Only host setting is to add a "Content-Disposition:
// attachment" header.
//
-// Clusters:
-// zzzzz:
-// Services:
-// WebDAVDownload:
-// ExternalURL: "https://domain.example:9999"
-// Collections:
-// TrustAllContent: true
+// Clusters:
+// zzzzz:
+// Services:
+// WebDAVDownload:
+// ExternalURL: "https://domain.example:9999"
+// Collections:
+// TrustAllContent: true
//
// Depending on your site configuration, you might also want to enable
// the "trust all content" setting in Workbench. Normally, Workbench
// avoids redirecting requests to keep-web if they depend on
// TrustAllContent being enabled.
//
-// Metrics
+// # Metrics
//
// Keep-web exposes request metrics in Prometheus text-based format at
// /metrics. The same information is also available as JSON at
// /metrics.json.
-//
package keepweb
diff --git a/services/keep-web/fpm-info.sh b/services/keep-web/fpm-info.sh
index 6bcbf67fe0..41d020efe5 100644
--- a/services/keep-web/fpm-info.sh
+++ b/services/keep-web/fpm-info.sh
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: AGPL-3.0
case "$TARGET" in
- centos*)
+ centos*|rocky*)
fpm_depends+=(mailcap)
;;
debian* | ubuntu*)
diff --git a/services/keep-web/handler.go b/services/keep-web/handler.go
index 1f1f509860..b9250efec7 100644
--- a/services/keep-web/handler.go
+++ b/services/keep-web/handler.go
@@ -6,6 +6,7 @@ package keepweb
import (
"encoding/json"
+ "errors"
"fmt"
"html"
"html/template"
@@ -13,34 +14,37 @@ import (
"net/http"
"net/url"
"os"
- "path/filepath"
"sort"
"strconv"
"strings"
"sync"
+ "time"
+ "git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/webdavfs"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/sirupsen/logrus"
"golang.org/x/net/webdav"
)
type handler struct {
- Cache cache
- Cluster *arvados.Cluster
- clientPool *arvadosclient.ClientPool
- setupOnce sync.Once
- webdavLS webdav.LockSystem
+ Cache cache
+ Cluster *arvados.Cluster
+ metrics *metrics
+
+ lockMtx sync.Mutex
+ lock map[string]*sync.RWMutex
+ lockTidied time.Time
}
var urlPDHDecoder = strings.NewReplacer(" ", "+", "-", "+")
-var notFoundMessage = "404 Not found\r\n\r\nThe requested path was not found, or you do not have permission to access it.\r"
-var unauthorizedMessage = "401 Unauthorized\r\n\r\nA valid Arvados token must be provided to access this resource.\r"
+var notFoundMessage = "Not Found"
+var unauthorizedMessage = "401 Unauthorized\n\nA valid Arvados token must be provided to access this resource."
// parseCollectionIDFromURL returns a UUID or PDH if s is a UUID or a
// PDH (even if it is a PDH with "+" replaced by " " or "-");
@@ -55,27 +59,20 @@ func parseCollectionIDFromURL(s string) string {
return ""
}
-func (h *handler) setup() {
- // Errors will be handled at the client pool.
- arv, _ := arvados.NewClientFromConfig(h.Cluster)
- h.clientPool = arvadosclient.MakeClientPoolWith(arv)
-
- keepclient.DefaultBlockCache.MaxBlocks = h.Cluster.Collections.WebDAVCache.MaxBlockEntries
-
- // Even though we don't accept LOCK requests, every webdav
- // handler must have a non-nil LockSystem.
- h.webdavLS = &noLockSystem{}
+func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
+ json.NewEncoder(w).Encode(struct{ Version string }{cmd.Version.String()})
}
-func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
- json.NewEncoder(w).Encode(struct{ Version string }{version})
+type errorWithHTTPStatus interface {
+ HTTPStatus() int
}
// updateOnSuccess wraps httpserver.ResponseWriter. If the handler
// sends an HTTP header indicating success, updateOnSuccess first
-// calls the provided update func. If the update func fails, a 500
-// response is sent, and the status code and body sent by the handler
-// are ignored (all response writes return the update error).
+// calls the provided update func. If the update func fails, an error
+// response is sent (using the error's HTTP status or 500 if none),
+// and the status code and body sent by the handler are ignored (all
+// response writes return the update error).
type updateOnSuccess struct {
httpserver.ResponseWriter
logger logrus.FieldLogger
@@ -100,10 +97,10 @@ func (uos *updateOnSuccess) WriteHeader(code int) {
if code >= 200 && code < 400 {
if uos.err = uos.update(); uos.err != nil {
code := http.StatusInternalServerError
- if err, ok := uos.err.(*arvados.TransactionError); ok {
- code = err.StatusCode
+ if he := errorWithHTTPStatus(nil); errors.As(uos.err, &he) {
+ code = he.HTTPStatus()
}
- uos.logger.WithError(uos.err).Errorf("update() returned error type %T, changing response to HTTP %d", uos.err, code)
+ uos.logger.WithError(uos.err).Errorf("update() returned %T error, changing response to HTTP %d", uos.err, code)
http.Error(uos.ResponseWriter, uos.err.Error(), code)
return
}
@@ -116,7 +113,7 @@ var (
corsAllowHeadersHeader = strings.Join([]string{
"Authorization", "Content-Type", "Range",
// WebDAV request headers:
- "Depth", "Destination", "If", "Lock-Token", "Overwrite", "Timeout",
+ "Depth", "Destination", "If", "Lock-Token", "Overwrite", "Timeout", "Cache-Control",
}, ", ")
writeMethod = map[string]bool{
"COPY": true,
@@ -177,23 +174,18 @@ func (h *handler) Done() <-chan struct{} {
// ServeHTTP implements http.Handler.
func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
- h.setupOnce.Do(h.setup)
-
if xfp := r.Header.Get("X-Forwarded-Proto"); xfp != "" && xfp != "http" {
r.URL.Scheme = xfp
}
- w := httpserver.WrapResponseWriter(wOrig)
+ wbuffer := newWriteBuffer(wOrig, int(h.Cluster.Collections.WebDAVOutputBuffer))
+ defer wbuffer.Close()
+ w := httpserver.WrapResponseWriter(responseWriter{
+ Writer: wbuffer,
+ ResponseWriter: wOrig,
+ })
- if method := r.Header.Get("Access-Control-Request-Method"); method != "" && r.Method == "OPTIONS" {
- if !browserMethod[method] && !webdavMethod[method] {
- w.WriteHeader(http.StatusMethodNotAllowed)
- return
- }
- w.Header().Set("Access-Control-Allow-Headers", corsAllowHeadersHeader)
- w.Header().Set("Access-Control-Allow-Methods", "COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK")
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Header().Set("Access-Control-Max-Age", "86400")
+ if r.Method == "OPTIONS" && ServeCORSPreflight(w, r.Header) {
return
}
@@ -216,7 +208,26 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
return
}
- pathParts := strings.Split(r.URL.Path[1:], "/")
+ webdavPrefix := ""
+ arvPath := r.URL.Path
+ if prefix := r.Header.Get("X-Webdav-Prefix"); prefix != "" {
+ // Enable a proxy (e.g., container log handler in
+ // controller) to satisfy a request for path
+ // "/foo/bar/baz.txt" using content from
+ // "//abc123-4.internal/bar/baz.txt", by adding a
+ // request header "X-Webdav-Prefix: /foo"
+ if !strings.HasPrefix(arvPath, prefix) {
+ http.Error(w, "X-Webdav-Prefix header is not a prefix of the requested path", http.StatusBadRequest)
+ return
+ }
+ arvPath = r.URL.Path[len(prefix):]
+ if arvPath == "" {
+ arvPath = "/"
+ }
+ w.Header().Set("Vary", "X-Webdav-Prefix, "+w.Header().Get("Vary"))
+ webdavPrefix = prefix
+ }
+ pathParts := strings.Split(arvPath[1:], "/")
var stripParts int
var collectionID string
@@ -271,11 +282,6 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
}
}
- if collectionID == "" && !useSiteFS {
- http.Error(w, notFoundMessage, http.StatusNotFound)
- return
- }
-
forceReload := false
if cc := r.Header.Get("Cache-Control"); strings.Contains(cc, "no-cache") || strings.Contains(cc, "must-revalidate") {
forceReload = true
@@ -285,12 +291,18 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
reqTokens = auth.CredentialsFromRequest(r).Tokens
}
- formToken := r.FormValue("api_token")
+ r.ParseForm()
origin := r.Header.Get("Origin")
cors := origin != "" && !strings.HasSuffix(origin, "://"+r.Host)
safeAjax := cors && (r.Method == http.MethodGet || r.Method == http.MethodHead)
- safeAttachment := attachment && r.URL.Query().Get("api_token") == ""
- if formToken == "" {
+ // Important distinction: safeAttachment checks whether api_token exists
+ // as a query parameter. haveFormTokens checks whether api_token exists
+ // as request form data *or* a query parameter. Different checks are
+ // necessary because both the request disposition and the location of
+ // the API token affect whether or not the request needs to be
+ // redirected. The different branch comments below explain further.
+ safeAttachment := attachment && !r.URL.Query().Has("api_token")
+ if formTokens, haveFormTokens := r.Form["api_token"]; !haveFormTokens {
// No token to use or redact.
} else if safeAjax || safeAttachment {
// If this is a cross-origin request, the URL won't
@@ -305,7 +317,9 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
// form?" problem, so provided the token isn't
// embedded in the URL, there's no reason to do
// redirect-with-cookie in this case either.
- reqTokens = append(reqTokens, formToken)
+ for _, tok := range formTokens {
+ reqTokens = append(reqTokens, tok)
+ }
} else if browserMethod[r.Method] {
// If this is a page view, and the client provided a
// token via query string or POST body, we must put
@@ -316,11 +330,6 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
return
}
- if useSiteFS {
- h.serveSiteFS(w, r, reqTokens, credentialsOK, attachment)
- return
- }
-
targetPath := pathParts[stripParts:]
if tokens == nil && len(targetPath) > 0 && strings.HasPrefix(targetPath[0], "t=") {
// http://ID.example/t=TOKEN/PATH...
@@ -335,20 +344,34 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
stripParts++
}
- if tokens == nil {
- tokens = reqTokens
- if h.Cluster.Users.AnonymousUserToken != "" {
- tokens = append(tokens, h.Cluster.Users.AnonymousUserToken)
+ fsprefix := ""
+ if useSiteFS {
+ if writeMethod[r.Method] {
+ http.Error(w, webdavfs.ErrReadOnly.Error(), http.StatusMethodNotAllowed)
+ return
+ }
+ if len(reqTokens) == 0 {
+ w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
+ http.Error(w, unauthorizedMessage, http.StatusUnauthorized)
+ return
}
+ tokens = reqTokens
+ } else if collectionID == "" {
+ http.Error(w, notFoundMessage, http.StatusNotFound)
+ return
+ } else {
+ fsprefix = "by_id/" + collectionID + "/"
+ }
+
+ if src := r.Header.Get("X-Webdav-Source"); strings.HasPrefix(src, "/") && !strings.Contains(src, "//") && !strings.Contains(src, "/../") {
+ fsprefix += src[1:]
}
if tokens == nil {
- if !credentialsOK {
- http.Error(w, fmt.Sprintf("Authorization tokens are not accepted here: %v, and no anonymous user token is configured.", reasonNotAcceptingCredentials), http.StatusUnauthorized)
- } else {
- http.Error(w, fmt.Sprintf("No authorization token in request, and no anonymous user token is configured."), http.StatusUnauthorized)
+ tokens = reqTokens
+ if h.Cluster.Users.AnonymousUserToken != "" {
+ tokens = append(tokens, h.Cluster.Users.AnonymousUserToken)
}
- return
}
if len(targetPath) > 0 && targetPath[0] == "_" {
@@ -362,53 +385,104 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
stripParts++
}
- arv := h.clientPool.Get()
- if arv == nil {
- http.Error(w, "client pool error: "+h.clientPool.Err().Error(), http.StatusInternalServerError)
- return
+ dirOpenMode := os.O_RDONLY
+ if writeMethod[r.Method] {
+ dirOpenMode = os.O_RDWR
}
- defer h.clientPool.Put(arv)
- var collection *arvados.Collection
+ var tokenValid bool
+ var tokenScopeProblem bool
+ var token string
var tokenUser *arvados.User
- tokenResult := make(map[string]int)
- for _, arv.ApiToken = range tokens {
- var err error
- collection, err = h.Cache.Get(arv, collectionID, forceReload)
- if err == nil {
- // Success
- break
+ var sessionFS arvados.CustomFileSystem
+ var session *cachedSession
+ var collectionDir arvados.File
+ for _, token = range tokens {
+ var statusErr errorWithHTTPStatus
+ fs, sess, user, err := h.Cache.GetSession(token)
+ if errors.As(err, &statusErr) && statusErr.HTTPStatus() == http.StatusUnauthorized {
+ // bad token
+ continue
+ } else if err != nil {
+ http.Error(w, "cache error: "+err.Error(), http.StatusInternalServerError)
+ return
}
- if srvErr, ok := err.(arvadosclient.APIServerError); ok {
- switch srvErr.HttpStatusCode {
- case 404, 401:
- // Token broken or insufficient to
- // retrieve collection
- tokenResult[arv.ApiToken] = srvErr.HttpStatusCode
- continue
+ if token != h.Cluster.Users.AnonymousUserToken {
+ tokenValid = true
+ }
+ f, err := fs.OpenFile(fsprefix, dirOpenMode, 0)
+ if errors.As(err, &statusErr) &&
+ statusErr.HTTPStatus() == http.StatusForbidden &&
+ token != h.Cluster.Users.AnonymousUserToken {
+ // collection id is outside scope of supplied
+ // token
+ tokenScopeProblem = true
+ sess.Release()
+ continue
+ } else if os.IsNotExist(err) {
+ // collection does not exist or is not
+ // readable using this token
+ sess.Release()
+ continue
+ } else if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ sess.Release()
+ return
+ }
+ defer f.Close()
+
+ collectionDir, sessionFS, session, tokenUser = f, fs, sess, user
+ break
+ }
+
+ // releaseSession() is equivalent to session.Release() except
+ // that it's a no-op if (1) session is nil, or (2) it has
+ // already been called.
+ //
+ // This way, we can do a defer call here to ensure it gets
+ // called in all code paths, and also call it inline (see
+ // below) in the cases where we want to release the lock
+ // before returning.
+ releaseSession := func() {}
+ if session != nil {
+ var releaseSessionOnce sync.Once
+ releaseSession = func() { releaseSessionOnce.Do(func() { session.Release() }) }
+ }
+ defer releaseSession()
+
+ if forceReload && collectionDir != nil {
+ err := collectionDir.Sync()
+ if err != nil {
+ if he := errorWithHTTPStatus(nil); errors.As(err, &he) {
+ http.Error(w, err.Error(), he.HTTPStatus())
+ } else {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
}
+ return
}
- // Something more serious is wrong
- http.Error(w, "cache error: "+err.Error(), http.StatusInternalServerError)
- return
}
- if collection == nil {
- if pathToken || !credentialsOK {
- // Either the URL is a "secret sharing link"
- // that didn't work out (and asking the client
- // for additional credentials would just be
- // confusing), or we don't even accept
- // credentials at this path.
+ if session == nil {
+ if pathToken {
+ // The URL is a "secret sharing link" that
+ // didn't work out. Asking the client for
+ // additional credentials would just be
+ // confusing.
http.Error(w, notFoundMessage, http.StatusNotFound)
return
}
- for _, t := range reqTokens {
- if tokenResult[t] == 404 {
- // The client provided valid token(s), but the
- // collection was not found.
- http.Error(w, notFoundMessage, http.StatusNotFound)
- return
- }
+ if tokenValid {
+ // The client provided valid token(s), but the
+ // collection was not found.
+ http.Error(w, notFoundMessage, http.StatusNotFound)
+ return
+ }
+ if tokenScopeProblem {
+ // The client provided a valid token but
+ // fetching a collection returned 401, which
+ // means the token scope doesn't permit
+ // fetching that collection.
+ http.Error(w, notFoundMessage, http.StatusForbidden)
+ return
}
// The client's token was invalid (e.g., expired), or
// the client didn't even provide one. Redirect to
@@ -445,212 +519,140 @@ func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
target.RawQuery = redirkey + "=" + callback
w.Header().Add("Location", target.String())
w.WriteHeader(http.StatusSeeOther)
- } else {
- w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
- http.Error(w, unauthorizedMessage, http.StatusUnauthorized)
+ return
+ }
+ if !credentialsOK {
+ http.Error(w, fmt.Sprintf("Authorization tokens are not accepted here: %v, and no anonymous user token is configured.", reasonNotAcceptingCredentials), http.StatusUnauthorized)
+ return
}
+ // If none of the above cases apply, suggest the
+ // user-agent (which is either a non-browser agent
+ // like wget, or a browser that can't redirect through
+ // a login flow) prompt the user for credentials.
+ w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
+ http.Error(w, unauthorizedMessage, http.StatusUnauthorized)
return
}
- kc, err := keepclient.MakeKeepClient(arv)
- if err != nil {
- http.Error(w, "error setting up keep client: "+err.Error(), http.StatusInternalServerError)
- return
+ if r.Method == http.MethodGet || r.Method == http.MethodHead {
+ targetfnm := fsprefix + strings.Join(pathParts[stripParts:], "/")
+ if fi, err := sessionFS.Stat(targetfnm); err == nil && fi.IsDir() {
+ releaseSession() // because we won't be writing anything
+ if !strings.HasSuffix(r.URL.Path, "/") {
+ h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
+ } else {
+ h.serveDirectory(w, r, fi.Name(), sessionFS, targetfnm, !useSiteFS)
+ }
+ return
+ }
}
- kc.RequestID = r.Header.Get("X-Request-Id")
var basename string
if len(targetPath) > 0 {
basename = targetPath[len(targetPath)-1]
}
- applyContentDispositionHdr(w, r, basename, attachment)
-
- client := (&arvados.Client{
- APIHost: arv.ApiServer,
- AuthToken: arv.ApiToken,
- Insecure: arv.ApiInsecure,
- }).WithRequestID(r.Header.Get("X-Request-Id"))
-
- fs, err := collection.FileSystem(client, kc)
- if err != nil {
- http.Error(w, "error creating collection filesystem: "+err.Error(), http.StatusInternalServerError)
+ if arvadosclient.PDHMatch(collectionID) && writeMethod[r.Method] {
+ http.Error(w, webdavfs.ErrReadOnly.Error(), http.StatusMethodNotAllowed)
return
}
-
- writefs, writeOK := fs.(arvados.CollectionFileSystem)
- targetIsPDH := arvadosclient.PDHMatch(collectionID)
- if (targetIsPDH || !writeOK) && writeMethod[r.Method] {
- http.Error(w, errReadOnly.Error(), http.StatusMethodNotAllowed)
+ if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
+ http.Error(w, "Not permitted", http.StatusForbidden)
return
}
+ h.logUploadOrDownload(r, session.arvadosclient, sessionFS, fsprefix+strings.Join(targetPath, "/"), nil, tokenUser)
- // Check configured permission
- _, sess, err := h.Cache.GetSession(arv.ApiToken)
- tokenUser, err = h.Cache.GetTokenUser(arv.ApiToken)
+ writing := writeMethod[r.Method]
+ locker := h.collectionLock(collectionID, writing)
+ defer locker.Unlock()
- if webdavMethod[r.Method] {
- if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
- http.Error(w, "Not permitted", http.StatusForbidden)
+ if writing {
+ // Save the collection only if/when all
+ // webdav->filesystem operations succeed --
+ // and send a 500 error if the modified
+ // collection can't be saved.
+ //
+ // Perform the write in a separate sitefs, so
+ // concurrent read operations on the same
+ // collection see the previous saved
+ // state. After the write succeeds and the
+ // collection record is updated, we reset the
+ // session so the updates are visible in
+ // subsequent read requests.
+ client := session.client.WithRequestID(r.Header.Get("X-Request-Id"))
+ sessionFS = client.SiteFileSystem(session.keepclient)
+ writingDir, err := sessionFS.OpenFile(fsprefix, os.O_RDONLY, 0)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- h.logUploadOrDownload(r, sess.arvadosclient, nil, strings.Join(targetPath, "/"), collection, tokenUser)
-
- if writeMethod[r.Method] {
- // Save the collection only if/when all
- // webdav->filesystem operations succeed --
- // and send a 500 error if the modified
- // collection can't be saved.
- w = &updateOnSuccess{
- ResponseWriter: w,
- logger: ctxlog.FromContext(r.Context()),
- update: func() error {
- return h.Cache.Update(client, *collection, writefs)
- }}
- }
- h := webdav.Handler{
- Prefix: "/" + strings.Join(pathParts[:stripParts], "/"),
- FileSystem: &webdavFS{
- collfs: fs,
- writing: writeMethod[r.Method],
- alwaysReadEOF: r.Method == "PROPFIND",
- },
- LockSystem: h.webdavLS,
- Logger: func(_ *http.Request, err error) {
+ defer writingDir.Close()
+ w = &updateOnSuccess{
+ ResponseWriter: w,
+ logger: ctxlog.FromContext(r.Context()),
+ update: func() error {
+ err := writingDir.Sync()
+ var te arvados.TransactionError
+ if errors.As(err, &te) {
+ err = te
+ }
if err != nil {
- ctxlog.FromContext(r.Context()).WithError(err).Error("error reported by webdav handler")
+ return err
}
- },
- }
- h.ServeHTTP(w, r)
- return
- }
-
- openPath := "/" + strings.Join(targetPath, "/")
- f, err := fs.Open(openPath)
- if os.IsNotExist(err) {
- // Requested non-existent path
- http.Error(w, notFoundMessage, http.StatusNotFound)
- return
- } else if err != nil {
- // Some other (unexpected) error
- http.Error(w, "open: "+err.Error(), http.StatusInternalServerError)
- return
- }
- defer f.Close()
- if stat, err := f.Stat(); err != nil {
- // Can't get Size/IsDir (shouldn't happen with a collectionFS!)
- http.Error(w, "stat: "+err.Error(), http.StatusInternalServerError)
- } else if stat.IsDir() && !strings.HasSuffix(r.URL.Path, "/") {
- // If client requests ".../dirname", redirect to
- // ".../dirname/". This way, relative links in the
- // listing for "dirname" can always be "fnm", never
- // "dirname/fnm".
- h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
- } else if stat.IsDir() {
- h.serveDirectory(w, r, collection.Name, fs, openPath, true)
+ // Sync the changes to the persistent
+ // sessionfs for this token.
+ snap, err := writingDir.Snapshot()
+ if err != nil {
+ return err
+ }
+ collectionDir.Splice(snap)
+ return nil
+ }}
} else {
- if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
- http.Error(w, "Not permitted", http.StatusForbidden)
- return
- }
- h.logUploadOrDownload(r, sess.arvadosclient, nil, strings.Join(targetPath, "/"), collection, tokenUser)
-
- http.ServeContent(w, r, basename, stat.ModTime(), f)
- if wrote := int64(w.WroteBodyBytes()); wrote != stat.Size() && w.WroteStatus() == http.StatusOK {
- // If we wrote fewer bytes than expected, it's
- // too late to change the real response code
- // or send an error message to the client, but
- // at least we can try to put some useful
- // debugging info in the logs.
- n, err := f.Read(make([]byte, 1024))
- ctxlog.FromContext(r.Context()).Errorf("stat.Size()==%d but only wrote %d bytes; read(1024) returns %d, %v", stat.Size(), wrote, n, err)
- }
- }
-}
-
-func (h *handler) getClients(reqID, token string) (arv *arvadosclient.ArvadosClient, kc *keepclient.KeepClient, client *arvados.Client, release func(), err error) {
- arv = h.clientPool.Get()
- if arv == nil {
- err = h.clientPool.Err()
- return
- }
- release = func() { h.clientPool.Put(arv) }
- arv.ApiToken = token
- kc, err = keepclient.MakeKeepClient(arv)
- if err != nil {
- release()
- return
- }
- kc.RequestID = reqID
- client = (&arvados.Client{
- APIHost: arv.ApiServer,
- AuthToken: arv.ApiToken,
- Insecure: arv.ApiInsecure,
- }).WithRequestID(reqID)
- return
-}
-
-func (h *handler) serveSiteFS(w http.ResponseWriter, r *http.Request, tokens []string, credentialsOK, attachment bool) {
- if len(tokens) == 0 {
- w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
- http.Error(w, unauthorizedMessage, http.StatusUnauthorized)
- return
- }
- if writeMethod[r.Method] {
- http.Error(w, errReadOnly.Error(), http.StatusMethodNotAllowed)
- return
- }
-
- fs, sess, err := h.Cache.GetSession(tokens[0])
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- fs.ForwardSlashNameSubstitution(h.Cluster.Collections.ForwardSlashNameSubstitution)
- f, err := fs.Open(r.URL.Path)
- if os.IsNotExist(err) {
- http.Error(w, err.Error(), http.StatusNotFound)
- return
- } else if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- defer f.Close()
- if fi, err := f.Stat(); err == nil && fi.IsDir() && r.Method == "GET" {
- if !strings.HasSuffix(r.URL.Path, "/") {
- h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
- } else {
- h.serveDirectory(w, r, fi.Name(), fs, r.URL.Path, false)
- }
- return
- }
-
- tokenUser, err := h.Cache.GetTokenUser(tokens[0])
- if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
- http.Error(w, "Not permitted", http.StatusForbidden)
- return
- }
- h.logUploadOrDownload(r, sess.arvadosclient, fs, r.URL.Path, nil, tokenUser)
-
- if r.Method == "GET" {
- _, basename := filepath.Split(r.URL.Path)
+ // When writing, we need to block session renewal
+ // until we're finished, in order to guarantee the
+ // effect of the write is visible in future responses.
+ // But if we're not writing, we can release the lock
+ // early. This enables us to keep renewing sessions
+ // and processing more requests even if a slow client
+ // takes a long time to download a large file.
+ releaseSession()
+ }
+ if r.Method == http.MethodGet {
applyContentDispositionHdr(w, r, basename, attachment)
}
- wh := webdav.Handler{
- Prefix: "/",
- FileSystem: &webdavFS{
- collfs: fs,
- writing: writeMethod[r.Method],
- alwaysReadEOF: r.Method == "PROPFIND",
+ if webdavPrefix == "" {
+ webdavPrefix = "/" + strings.Join(pathParts[:stripParts], "/")
+ }
+ wh := &webdav.Handler{
+ Prefix: webdavPrefix,
+ FileSystem: &webdavfs.FS{
+ FileSystem: sessionFS,
+ Prefix: fsprefix,
+ Writing: writeMethod[r.Method],
+ AlwaysReadEOF: r.Method == "PROPFIND",
},
- LockSystem: h.webdavLS,
- Logger: func(_ *http.Request, err error) {
- if err != nil {
+ LockSystem: webdavfs.NoLockSystem,
+ Logger: func(r *http.Request, err error) {
+ if err != nil && !os.IsNotExist(err) {
ctxlog.FromContext(r.Context()).WithError(err).Error("error reported by webdav handler")
}
},
}
- wh.ServeHTTP(w, r)
+ h.metrics.track(wh, w, r)
+ if r.Method == http.MethodGet && w.WroteStatus() == http.StatusOK {
+ wrote := int64(w.WroteBodyBytes())
+ fnm := strings.Join(pathParts[stripParts:], "/")
+ fi, err := wh.FileSystem.Stat(r.Context(), fnm)
+ if err == nil && fi.Size() != wrote {
+ var n int
+ f, err := wh.FileSystem.OpenFile(r.Context(), fnm, os.O_RDONLY, 0)
+ if err == nil {
+ n, err = f.Read(make([]byte, 1024))
+ f.Close()
+ }
+ ctxlog.FromContext(r.Context()).Errorf("stat.Size()==%d but only wrote %d bytes; read(1024) returns %d, %v", fi.Size(), wrote, n, err)
+ }
+ }
}
var dirListingTemplate = `
@@ -805,7 +807,7 @@ func applyContentDispositionHdr(w http.ResponseWriter, r *http.Request, filename
}
func (h *handler) seeOtherWithCookie(w http.ResponseWriter, r *http.Request, location string, credentialsOK bool) {
- if formToken := r.FormValue("api_token"); formToken != "" {
+ if formTokens, haveFormTokens := r.Form["api_token"]; haveFormTokens {
if !credentialsOK {
// It is not safe to copy the provided token
// into a cookie unless the current vhost
@@ -826,13 +828,19 @@ func (h *handler) seeOtherWithCookie(w http.ResponseWriter, r *http.Request, loc
// bar, and in the case of a POST request to avoid
// raising warnings when the user refreshes the
// resulting page.
- http.SetCookie(w, &http.Cookie{
- Name: "arvados_api_token",
- Value: auth.EncodeTokenCookie([]byte(formToken)),
- Path: "/",
- HttpOnly: true,
- SameSite: http.SameSiteLaxMode,
- })
+ for _, tok := range formTokens {
+ if tok == "" {
+ continue
+ }
+ http.SetCookie(w, &http.Cookie{
+ Name: "arvados_api_token",
+ Value: auth.EncodeTokenCookie([]byte(tok)),
+ Path: "/",
+ HttpOnly: true,
+ SameSite: http.SameSiteLaxMode,
+ })
+ break
+ }
}
// Propagate query parameters (except api_token) from
@@ -909,17 +917,18 @@ func (h *handler) logUploadOrDownload(
collection, filepath = h.determineCollection(fs, filepath)
}
if collection != nil {
- log = log.WithField("collection_uuid", collection.UUID).
- WithField("collection_file_path", filepath)
- props["collection_uuid"] = collection.UUID
+ log = log.WithField("collection_file_path", filepath)
props["collection_file_path"] = filepath
- // h.determineCollection populates the collection_uuid prop with the PDH, if
- // this collection is being accessed via PDH. In that case, blank the
- // collection_uuid field so that consumers of the log entries can rely on it
- // being a UUID, or blank. The PDH remains available via the
- // portable_data_hash property.
- if props["collection_uuid"] == collection.PortableDataHash {
- props["collection_uuid"] = ""
+ // h.determineCollection populates the collection_uuid
+ // prop with the PDH, if this collection is being
+ // accessed via PDH. For logging, we use a different
+ // field depending on whether it's a UUID or PDH.
+ if len(collection.UUID) > 32 {
+ log = log.WithField("portable_data_hash", collection.UUID)
+ props["portable_data_hash"] = collection.UUID
+ } else {
+ log = log.WithField("collection_uuid", collection.UUID)
+ props["collection_uuid"] = collection.UUID
}
}
if r.Method == "PUT" || r.Method == "POST" {
@@ -958,29 +967,78 @@ func (h *handler) logUploadOrDownload(
}
func (h *handler) determineCollection(fs arvados.CustomFileSystem, path string) (*arvados.Collection, string) {
- segments := strings.Split(path, "/")
- var i int
- for i = 0; i < len(segments); i++ {
- dir := append([]string{}, segments[0:i]...)
- dir = append(dir, ".arvados#collection")
- f, err := fs.OpenFile(strings.Join(dir, "/"), os.O_RDONLY, 0)
- if f != nil {
- defer f.Close()
- }
- if err != nil {
- if !os.IsNotExist(err) {
- return nil, ""
- }
+ target := strings.TrimSuffix(path, "/")
+ for cut := len(target); cut >= 0; cut = strings.LastIndexByte(target, '/') {
+ target = target[:cut]
+ fi, err := fs.Stat(target)
+ if os.IsNotExist(err) {
+ // creating a new file/dir, or download
+ // destined to fail
continue
+ } else if err != nil {
+ return nil, ""
}
- // err is nil so we found it.
- decoder := json.NewDecoder(f)
- var collection arvados.Collection
- err = decoder.Decode(&collection)
- if err != nil {
+ switch src := fi.Sys().(type) {
+ case *arvados.Collection:
+ return src, strings.TrimPrefix(path[len(target):], "/")
+ case *arvados.Group:
return nil, ""
+ default:
+ if _, ok := src.(error); ok {
+ return nil, ""
+ }
}
- return &collection, strings.Join(segments[i:], "/")
}
return nil, ""
}
+
+var lockTidyInterval = time.Minute * 10
+
+// Lock the specified collection for reading or writing. Caller must
+// call Unlock() on the returned Locker when the operation is
+// finished.
+func (h *handler) collectionLock(collectionID string, writing bool) sync.Locker {
+ h.lockMtx.Lock()
+ defer h.lockMtx.Unlock()
+ if time.Since(h.lockTidied) > lockTidyInterval {
+ // Periodically delete all locks that aren't in use.
+ h.lockTidied = time.Now()
+ for id, locker := range h.lock {
+ if locker.TryLock() {
+ locker.Unlock()
+ delete(h.lock, id)
+ }
+ }
+ }
+ locker := h.lock[collectionID]
+ if locker == nil {
+ locker = new(sync.RWMutex)
+ if h.lock == nil {
+ h.lock = map[string]*sync.RWMutex{}
+ }
+ h.lock[collectionID] = locker
+ }
+ if writing {
+ locker.Lock()
+ return locker
+ } else {
+ locker.RLock()
+ return locker.RLocker()
+ }
+}
+
+func ServeCORSPreflight(w http.ResponseWriter, header http.Header) bool {
+ method := header.Get("Access-Control-Request-Method")
+ if method == "" {
+ return false
+ }
+ if !browserMethod[method] && !webdavMethod[method] {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return true
+ }
+ w.Header().Set("Access-Control-Allow-Headers", corsAllowHeadersHeader)
+ w.Header().Set("Access-Control-Allow-Methods", "COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Max-Age", "86400")
+ return true
+}
diff --git a/services/keep-web/handler_test.go b/services/keep-web/handler_test.go
index 768013185a..07c7016d3a 100644
--- a/services/keep-web/handler_test.go
+++ b/services/keep-web/handler_test.go
@@ -18,6 +18,7 @@ import (
"path/filepath"
"regexp"
"strings"
+ "sync"
"time"
"git.arvados.org/arvados.git/lib/config"
@@ -59,6 +60,7 @@ func (s *UnitSuite) SetUpTest(c *check.C) {
logger: logger,
registry: prometheus.NewRegistry(),
},
+ metrics: newMetrics(prometheus.NewRegistry()),
}
}
@@ -83,7 +85,7 @@ func (s *UnitSuite) TestCORSPreflight(c *check.C) {
c.Check(resp.Body.String(), check.Equals, "")
c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK")
- c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout")
+ c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout, Cache-Control")
// Check preflight for a disallowed request
resp = httptest.NewRecorder()
@@ -93,7 +95,125 @@ func (s *UnitSuite) TestCORSPreflight(c *check.C) {
c.Check(resp.Code, check.Equals, http.StatusMethodNotAllowed)
}
+func (s *UnitSuite) TestWebdavPrefixAndSource(c *check.C) {
+ for _, trial := range []struct {
+ method string
+ path string
+ prefix string
+ source string
+ notFound bool
+ seeOther bool
+ }{
+ {
+ method: "PROPFIND",
+ path: "/",
+ },
+ {
+ method: "PROPFIND",
+ path: "/dir1",
+ },
+ {
+ method: "PROPFIND",
+ path: "/dir1/",
+ },
+ {
+ method: "PROPFIND",
+ path: "/dir1/foo",
+ prefix: "/dir1",
+ source: "/dir1",
+ },
+ {
+ method: "PROPFIND",
+ path: "/prefix/dir1/foo",
+ prefix: "/prefix/",
+ source: "",
+ },
+ {
+ method: "PROPFIND",
+ path: "/prefix/dir1/foo",
+ prefix: "/prefix",
+ source: "",
+ },
+ {
+ method: "PROPFIND",
+ path: "/prefix/dir1/foo",
+ prefix: "/prefix/",
+ source: "/",
+ },
+ {
+ method: "PROPFIND",
+ path: "/prefix/foo",
+ prefix: "/prefix/",
+ source: "/dir1/",
+ },
+ {
+ method: "GET",
+ path: "/prefix/foo",
+ prefix: "/prefix/",
+ source: "/dir1/",
+ },
+ {
+ method: "PROPFIND",
+ path: "/prefix/",
+ prefix: "/prefix",
+ source: "/dir1",
+ },
+ {
+ method: "PROPFIND",
+ path: "/prefix",
+ prefix: "/prefix",
+ source: "/dir1/",
+ },
+ {
+ method: "GET",
+ path: "/prefix",
+ prefix: "/prefix",
+ source: "/dir1",
+ seeOther: true,
+ },
+ {
+ method: "PROPFIND",
+ path: "/dir1/foo",
+ prefix: "",
+ source: "/dir1",
+ notFound: true,
+ },
+ } {
+ c.Logf("trial %+v", trial)
+ u := mustParseURL("http://" + arvadostest.FooBarDirCollection + ".keep-web.example" + trial.path)
+ req := &http.Request{
+ Method: trial.method,
+ Host: u.Host,
+ URL: u,
+ RequestURI: u.RequestURI(),
+ Header: http.Header{
+ "Authorization": {"Bearer " + arvadostest.ActiveTokenV2},
+ "X-Webdav-Prefix": {trial.prefix},
+ "X-Webdav-Source": {trial.source},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ if trial.notFound {
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ } else if trial.method == "PROPFIND" {
+ c.Check(resp.Code, check.Equals, http.StatusMultiStatus)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*>\n?$`)
+ } else if trial.seeOther {
+ c.Check(resp.Code, check.Equals, http.StatusSeeOther)
+ } else {
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ }
+ }
+}
+
func (s *UnitSuite) TestEmptyResponse(c *check.C) {
+ // Ensure we start with an empty cache
+ defer os.Setenv("HOME", os.Getenv("HOME"))
+ os.Setenv("HOME", c.MkDir())
+
for _, trial := range []struct {
dataExists bool
sendIMSHeader bool
@@ -213,9 +333,10 @@ func (s *IntegrationSuite) TestVhostViaAuthzHeaderOAuth2(c *check.C) {
s.doVhostRequests(c, authzViaAuthzHeaderOAuth2)
}
func authzViaAuthzHeaderOAuth2(r *http.Request, tok string) int {
- r.Header.Add("Authorization", "Bearer "+tok)
+ r.Header.Add("Authorization", "OAuth2 "+tok)
return http.StatusUnauthorized
}
+
func (s *IntegrationSuite) TestVhostViaAuthzHeaderBearer(c *check.C) {
s.doVhostRequests(c, authzViaAuthzHeaderBearer)
}
@@ -235,6 +356,27 @@ func authzViaCookieValue(r *http.Request, tok string) int {
return http.StatusUnauthorized
}
+func (s *IntegrationSuite) TestVhostViaHTTPBasicAuth(c *check.C) {
+ s.doVhostRequests(c, authzViaHTTPBasicAuth)
+}
+func authzViaHTTPBasicAuth(r *http.Request, tok string) int {
+ r.AddCookie(&http.Cookie{
+ Name: "arvados_api_token",
+ Value: auth.EncodeTokenCookie([]byte(tok)),
+ })
+ return http.StatusUnauthorized
+}
+
+func (s *IntegrationSuite) TestVhostViaHTTPBasicAuthWithExtraSpaceChars(c *check.C) {
+ s.doVhostRequests(c, func(r *http.Request, tok string) int {
+ r.AddCookie(&http.Cookie{
+ Name: "arvados_api_token",
+ Value: auth.EncodeTokenCookie([]byte(" " + tok + "\n")),
+ })
+ return http.StatusUnauthorized
+ })
+}
+
func (s *IntegrationSuite) TestVhostViaPath(c *check.C) {
s.doVhostRequests(c, authzViaPath)
}
@@ -366,6 +508,24 @@ func (s *IntegrationSuite) TestVhostPortMatch(c *check.C) {
}
}
+func (s *IntegrationSuite) do(method string, urlstring string, token string, hdr http.Header) (*http.Request, *httptest.ResponseRecorder) {
+ u := mustParseURL(urlstring)
+ if hdr == nil && token != "" {
+ hdr = http.Header{"Authorization": {"Bearer " + token}}
+ } else if hdr == nil {
+ hdr = http.Header{}
+ } else if token != "" {
+ panic("must not pass both token and hdr")
+ }
+ return s.doReq(&http.Request{
+ Method: method,
+ Host: u.Host,
+ URL: u,
+ RequestURI: u.RequestURI(),
+ Header: hdr,
+ })
+}
+
func (s *IntegrationSuite) doReq(req *http.Request) (*http.Request, *httptest.ResponseRecorder) {
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
@@ -409,6 +569,26 @@ func (s *IntegrationSuite) TestSingleOriginSecretLink(c *check.C) {
)
}
+func (s *IntegrationSuite) TestCollectionSharingToken(c *check.C) {
+ s.testVhostRedirectTokenToCookie(c, "GET",
+ "example.com/c="+arvadostest.FooFileCollectionUUID+"/t="+arvadostest.FooFileCollectionSharingToken+"/foo",
+ "",
+ nil,
+ "",
+ http.StatusOK,
+ "foo",
+ )
+ // Same valid sharing token, but requesting a different collection
+ s.testVhostRedirectTokenToCookie(c, "GET",
+ "example.com/c="+arvadostest.FooCollection+"/t="+arvadostest.FooFileCollectionSharingToken+"/foo",
+ "",
+ nil,
+ "",
+ http.StatusNotFound,
+ regexp.QuoteMeta(notFoundMessage+"\n"),
+ )
+}
+
// Bad token in URL is 404 Not Found because it doesn't make sense to
// retry the same URL with different authorization.
func (s *IntegrationSuite) TestSingleOriginSecretLinkBadToken(c *check.C) {
@@ -418,7 +598,7 @@ func (s *IntegrationSuite) TestSingleOriginSecretLinkBadToken(c *check.C) {
nil,
"",
http.StatusNotFound,
- notFoundMessage+"\n",
+ regexp.QuoteMeta(notFoundMessage+"\n"),
)
}
@@ -481,7 +661,7 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenToBogusCookie(c *check.C)
http.Header{"Sec-Fetch-Mode": {"cors"}},
"",
http.StatusUnauthorized,
- unauthorizedMessage+"\n",
+ regexp.QuoteMeta(unauthorizedMessage+"\n"),
)
s.testVhostRedirectTokenToCookie(c, "GET",
s.handler.Cluster.Services.WebDAVDownload.ExternalURL.Host+"/c="+arvadostest.FooCollection+"/foo",
@@ -489,10 +669,65 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenToBogusCookie(c *check.C)
nil,
"",
http.StatusUnauthorized,
- unauthorizedMessage+"\n",
+ regexp.QuoteMeta(unauthorizedMessage+"\n"),
)
}
+func (s *IntegrationSuite) TestVhostRedirectWithNoCache(c *check.C) {
+ resp := s.testVhostRedirectTokenToCookie(c, "GET",
+ arvadostest.FooCollection+".example.com/foo",
+ "?api_token=thisisabogustoken",
+ http.Header{
+ "Sec-Fetch-Mode": {"navigate"},
+ "Cache-Control": {"no-cache"},
+ },
+ "",
+ http.StatusSeeOther,
+ "",
+ )
+ u, err := url.Parse(resp.Header().Get("Location"))
+ c.Assert(err, check.IsNil)
+ c.Logf("redirected to %s", u)
+ c.Check(u.Host, check.Equals, s.handler.Cluster.Services.Workbench2.ExternalURL.Host)
+ c.Check(u.Query().Get("redirectToPreview"), check.Equals, "/c="+arvadostest.FooCollection+"/foo")
+ c.Check(u.Query().Get("redirectToDownload"), check.Equals, "")
+}
+
+func (s *IntegrationSuite) TestNoTokenWorkbench2LoginFlow(c *check.C) {
+ for _, trial := range []struct {
+ anonToken bool
+ cacheControl string
+ }{
+ {},
+ {cacheControl: "no-cache"},
+ {anonToken: true},
+ {anonToken: true, cacheControl: "no-cache"},
+ } {
+ c.Logf("trial: %+v", trial)
+
+ if trial.anonToken {
+ s.handler.Cluster.Users.AnonymousUserToken = arvadostest.AnonymousToken
+ } else {
+ s.handler.Cluster.Users.AnonymousUserToken = ""
+ }
+ req, err := http.NewRequest("GET", "http://"+arvadostest.FooCollection+".example.com/foo", nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Sec-Fetch-Mode", "navigate")
+ if trial.cacheControl != "" {
+ req.Header.Set("Cache-Control", trial.cacheControl)
+ }
+ resp := httptest.NewRecorder()
+ s.handler.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusSeeOther)
+ u, err := url.Parse(resp.Header().Get("Location"))
+ c.Assert(err, check.IsNil)
+ c.Logf("redirected to %q", u)
+ c.Check(u.Host, check.Equals, s.handler.Cluster.Services.Workbench2.ExternalURL.Host)
+ c.Check(u.Query().Get("redirectToPreview"), check.Equals, "/c="+arvadostest.FooCollection+"/foo")
+ c.Check(u.Query().Get("redirectToDownload"), check.Equals, "")
+ }
+}
+
func (s *IntegrationSuite) TestVhostRedirectQueryTokenSingleOriginError(c *check.C) {
s.testVhostRedirectTokenToCookie(c, "GET",
"example.com/c="+arvadostest.FooCollection+"/foo",
@@ -500,7 +735,7 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenSingleOriginError(c *check
nil,
"",
http.StatusBadRequest,
- "cannot serve inline content at this URL (possible configuration error; see https://doc.arvados.org/install/install-keep-web.html#dns)\n",
+ regexp.QuoteMeta("cannot serve inline content at this URL (possible configuration error; see https://doc.arvados.org/install/install-keep-web.html#dns)\n"),
)
}
@@ -575,7 +810,7 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenAttachmentOnlyHost(c *chec
nil,
"",
http.StatusBadRequest,
- "cannot serve inline content at this URL (possible configuration error; see https://doc.arvados.org/install/install-keep-web.html#dns)\n",
+ regexp.QuoteMeta("cannot serve inline content at this URL (possible configuration error; see https://doc.arvados.org/install/install-keep-web.html#dns)\n"),
)
resp := s.testVhostRedirectTokenToCookie(c, "GET",
@@ -589,6 +824,34 @@ func (s *IntegrationSuite) TestVhostRedirectQueryTokenAttachmentOnlyHost(c *chec
c.Check(resp.Header().Get("Content-Disposition"), check.Equals, "attachment")
}
+func (s *IntegrationSuite) TestVhostRedirectMultipleTokens(c *check.C) {
+ baseUrl := arvadostest.FooCollection + ".example.com/foo"
+ query := url.Values{}
+
+ // The intent of these tests is to check that requests are redirected
+ // correctly in the presence of multiple API tokens. The exact response
+ // codes and content are not closely considered: they're just how
+ // keep-web responded when we made the smallest possible fix. Changing
+ // those responses may be okay, but you should still test all these
+ // different cases and the associated redirect logic.
+ query["api_token"] = []string{arvadostest.ActiveToken, arvadostest.AnonymousToken}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusOK, "foo")
+ query["api_token"] = []string{arvadostest.ActiveToken, arvadostest.AnonymousToken, ""}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusOK, "foo")
+ query["api_token"] = []string{arvadostest.ActiveToken, "", arvadostest.AnonymousToken}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusOK, "foo")
+ query["api_token"] = []string{"", arvadostest.ActiveToken}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusOK, "foo")
+
+ expectContent := regexp.QuoteMeta(unauthorizedMessage + "\n")
+ query["api_token"] = []string{arvadostest.AnonymousToken, "invalidtoo"}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusUnauthorized, expectContent)
+ query["api_token"] = []string{arvadostest.AnonymousToken, ""}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusUnauthorized, expectContent)
+ query["api_token"] = []string{"", arvadostest.AnonymousToken}
+ s.testVhostRedirectTokenToCookie(c, "GET", baseUrl, "?"+query.Encode(), nil, "", http.StatusUnauthorized, expectContent)
+}
+
func (s *IntegrationSuite) TestVhostRedirectPOSTFormTokenToCookie(c *check.C) {
s.testVhostRedirectTokenToCookie(c, "POST",
arvadostest.FooCollection+".example.com/foo",
@@ -607,7 +870,7 @@ func (s *IntegrationSuite) TestVhostRedirectPOSTFormTokenToCookie404(c *check.C)
http.Header{"Content-Type": {"application/x-www-form-urlencoded"}},
url.Values{"api_token": {arvadostest.SpectatorToken}}.Encode(),
http.StatusNotFound,
- notFoundMessage+"\n",
+ regexp.QuoteMeta(notFoundMessage+"\n"),
)
}
@@ -630,8 +893,8 @@ func (s *IntegrationSuite) TestAnonymousTokenError(c *check.C) {
"",
nil,
"",
- http.StatusNotFound,
- notFoundMessage+"\n",
+ http.StatusUnauthorized,
+ "Authorization tokens are not accepted here: .*\n",
)
}
@@ -768,7 +1031,7 @@ func (s *IntegrationSuite) TestXHRNoRedirect(c *check.C) {
c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
}
-func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, hostPath, queryString string, reqHeader http.Header, reqBody string, expectStatus int, expectRespBody string) *httptest.ResponseRecorder {
+func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, hostPath, queryString string, reqHeader http.Header, reqBody string, expectStatus int, matchRespBody string) *httptest.ResponseRecorder {
if reqHeader == nil {
reqHeader = http.Header{}
}
@@ -786,25 +1049,41 @@ func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, ho
resp := httptest.NewRecorder()
defer func() {
c.Check(resp.Code, check.Equals, expectStatus)
- c.Check(resp.Body.String(), check.Equals, expectRespBody)
+ c.Check(resp.Body.String(), check.Matches, matchRespBody)
}()
s.handler.ServeHTTP(resp, req)
if resp.Code != http.StatusSeeOther {
+ attachment, _ := regexp.MatchString(`^attachment(;|$)`, resp.Header().Get("Content-Disposition"))
+ // Since we're not redirecting, check that any api_token in the URL is
+ // handled safely.
+ // If there is no token in the URL, then we're good.
+ // Otherwise, if the response code is an error, the body is expected to
+ // be static content, and nothing that might maliciously introspect the
+ // URL. It's considered safe and allowed.
+ // Otherwise, if the response content has attachment disposition,
+ // that's considered safe for all the reasons explained in the
+ // safeAttachment comment in handler.go.
+ c.Check(!u.Query().Has("api_token") || resp.Code >= 400 || attachment, check.Equals, true)
return resp
}
+
+ loc, err := url.Parse(resp.Header().Get("Location"))
+ c.Assert(err, check.IsNil)
+ c.Check(loc.Scheme, check.Equals, u.Scheme)
+ c.Check(loc.Host, check.Equals, u.Host)
+ c.Check(loc.RawPath, check.Equals, u.RawPath)
+ // If the response was a redirect, it should never include an API token.
+ c.Check(loc.Query().Has("api_token"), check.Equals, false)
c.Check(resp.Body.String(), check.Matches, `.*href="http://`+regexp.QuoteMeta(html.EscapeString(hostPath))+`(\?[^"]*)?".*`)
- c.Check(strings.Split(resp.Header().Get("Location"), "?")[0], check.Equals, "http://"+hostPath)
cookies := (&http.Response{Header: resp.Header()}).Cookies()
- u, err := u.Parse(resp.Header().Get("Location"))
- c.Assert(err, check.IsNil)
c.Logf("following redirect to %s", u)
req = &http.Request{
Method: "GET",
- Host: u.Host,
- URL: u,
- RequestURI: u.RequestURI(),
+ Host: loc.Host,
+ URL: loc,
+ RequestURI: loc.RequestURI(),
Header: reqHeader,
}
for _, c := range cookies {
@@ -831,6 +1110,17 @@ func (s *IntegrationSuite) TestDirectoryListingWithNoAnonymousToken(c *check.C)
}
func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
+ // The "ownership cycle" test fixtures are reachable from the
+ // "filter group without filters" group, causing webdav's
+ // walkfs to recurse indefinitely. Avoid that by deleting one
+ // of the bogus fixtures.
+ arv := arvados.NewClientFromEnv()
+ err := arv.RequestAndDecode(nil, "DELETE", "arvados/v1/groups/zzzzz-j7d0g-cx2al9cqkmsf1hs", nil, nil)
+ if err != nil {
+ c.Assert(err, check.FitsTypeOf, &arvados.TransactionError{})
+ c.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 404)
+ }
+
s.handler.Cluster.Services.WebDAVDownload.ExternalURL.Host = "download.example.com"
authHeader := http.Header{
"Authorization": {"OAuth2 " + arvadostest.ActiveToken},
@@ -967,8 +1257,32 @@ func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
expect: []string{"waz"},
cutDirs: 2,
},
+ {
+ uri: "download.example.com/users/active/This filter group/",
+ header: authHeader,
+ expect: []string{"A Subproject/"},
+ cutDirs: 3,
+ },
+ {
+ uri: "download.example.com/users/active/This filter group/A Subproject",
+ header: authHeader,
+ expect: []string{"baz_file/"},
+ cutDirs: 4,
+ },
+ {
+ uri: "download.example.com/by_id/" + arvadostest.AFilterGroupUUID,
+ header: authHeader,
+ expect: []string{"A Subproject/"},
+ cutDirs: 2,
+ },
+ {
+ uri: "download.example.com/by_id/" + arvadostest.AFilterGroupUUID + "/A Subproject",
+ header: authHeader,
+ expect: []string{"baz_file/"},
+ cutDirs: 3,
+ },
} {
- comment := check.Commentf("HTML: %q => %q", trial.uri, trial.expect)
+ comment := check.Commentf("HTML: %q redir %q => %q", trial.uri, trial.redirect, trial.expect)
resp := httptest.NewRecorder()
u := mustParseURL("//" + trial.uri)
req := &http.Request{
@@ -1000,14 +1314,11 @@ func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
c.Check(req.URL.Path, check.Equals, trial.redirect, comment)
}
if trial.expect == nil {
- if s.handler.Cluster.Users.AnonymousUserToken == "" {
- c.Check(resp.Code, check.Equals, http.StatusUnauthorized, comment)
- } else {
- c.Check(resp.Code, check.Equals, http.StatusNotFound, comment)
- }
+ c.Check(resp.Code, check.Equals, http.StatusUnauthorized, comment)
} else {
c.Check(resp.Code, check.Equals, http.StatusOK, comment)
for _, e := range trial.expect {
+ e = strings.Replace(e, " ", "%20", -1)
c.Check(resp.Body.String(), check.Matches, `(?ms).*href="./`+e+`".*`, comment)
}
c.Check(resp.Body.String(), check.Matches, `(?ms).*--cut-dirs=`+fmt.Sprintf("%d", trial.cutDirs)+` .*`, comment)
@@ -1025,11 +1336,7 @@ func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
resp = httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
if trial.expect == nil {
- if s.handler.Cluster.Users.AnonymousUserToken == "" {
- c.Check(resp.Code, check.Equals, http.StatusUnauthorized, comment)
- } else {
- c.Check(resp.Code, check.Equals, http.StatusNotFound, comment)
- }
+ c.Check(resp.Code, check.Equals, http.StatusUnauthorized, comment)
} else {
c.Check(resp.Code, check.Equals, http.StatusOK, comment)
}
@@ -1044,12 +1351,14 @@ func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
}
resp = httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
+ // This check avoids logging a big XML document in the
+ // event webdav throws a 500 error after sending
+ // headers for a 207.
+ if !c.Check(strings.HasSuffix(resp.Body.String(), "Internal Server Error"), check.Equals, false) {
+ continue
+ }
if trial.expect == nil {
- if s.handler.Cluster.Users.AnonymousUserToken == "" {
- c.Check(resp.Code, check.Equals, http.StatusUnauthorized, comment)
- } else {
- c.Check(resp.Code, check.Equals, http.StatusNotFound, comment)
- }
+ c.Check(resp.Code, check.Equals, http.StatusUnauthorized, comment)
} else {
c.Check(resp.Code, check.Equals, http.StatusMultiStatus, comment)
for _, e := range trial.expect {
@@ -1058,6 +1367,7 @@ func (s *IntegrationSuite) testDirectoryListing(c *check.C) {
} else {
e = filepath.Join(u.Path, e)
}
+ e = strings.Replace(e, " ", "%20", -1)
c.Check(resp.Body.String(), check.Matches, `(?ms).*`+e+` .*`, comment)
}
}
@@ -1164,20 +1474,14 @@ func (s *IntegrationSuite) TestFileContentType(c *check.C) {
}
}
-func (s *IntegrationSuite) TestKeepClientBlockCache(c *check.C) {
- s.handler.Cluster.Collections.WebDAVCache.MaxBlockEntries = 42
- c.Check(keepclient.DefaultBlockCache.MaxBlocks, check.Not(check.Equals), 42)
- u := mustParseURL("http://keep-web.example/c=" + arvadostest.FooCollection + "/t=" + arvadostest.ActiveToken + "/foo")
- req := &http.Request{
- Method: "GET",
- Host: u.Host,
- URL: u,
- RequestURI: u.RequestURI(),
- }
+func (s *IntegrationSuite) TestCacheSize(c *check.C) {
+ req, err := http.NewRequest("GET", "http://"+arvadostest.FooCollection+".example.com/foo", nil)
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+ c.Assert(err, check.IsNil)
resp := httptest.NewRecorder()
s.handler.ServeHTTP(resp, req)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- c.Check(keepclient.DefaultBlockCache.MaxBlocks, check.Equals, 42)
+ c.Assert(resp.Code, check.Equals, http.StatusOK)
+ c.Check(s.handler.Cache.sessions[arvadostest.ActiveTokenV2].client.DiskCacheSize.Percent(), check.Equals, int64(10))
}
// Writing to a collection shouldn't affect its entry in the
@@ -1245,7 +1549,7 @@ func copyHeader(h http.Header) http.Header {
}
func (s *IntegrationSuite) checkUploadDownloadRequest(c *check.C, req *http.Request,
- successCode int, direction string, perm bool, userUuid string, collectionUuid string, filepath string) {
+ successCode int, direction string, perm bool, userUuid, collectionUuid, collectionPDH, filepath string) {
client := arvados.NewClientFromEnv()
client.AuthToken = arvadostest.AdminToken
@@ -1258,6 +1562,7 @@ func (s *IntegrationSuite) checkUploadDownloadRequest(c *check.C, req *http.Requ
c.Check(err, check.IsNil)
c.Check(logentries.Items, check.HasLen, 1)
lastLogId := logentries.Items[0].ID
+ c.Logf("lastLogId: %d", lastLogId)
var logbuf bytes.Buffer
logger := logrus.New()
@@ -1274,6 +1579,7 @@ func (s *IntegrationSuite) checkUploadDownloadRequest(c *check.C, req *http.Requ
deadline := time.Now().Add(time.Second)
for {
c.Assert(time.Now().After(deadline), check.Equals, false, check.Commentf("timed out waiting for log entry"))
+ logentries = arvados.LogList{}
err = client.RequestAndDecode(&logentries, "GET", "arvados/v1/logs", nil,
arvados.ResourceListParams{
Filters: []arvados.Filter{
@@ -1288,6 +1594,7 @@ func (s *IntegrationSuite) checkUploadDownloadRequest(c *check.C, req *http.Requ
logentries.Items[0].ID > lastLogId &&
logentries.Items[0].ObjectUUID == userUuid &&
logentries.Items[0].Properties["collection_uuid"] == collectionUuid &&
+ (collectionPDH == "" || logentries.Items[0].Properties["portable_data_hash"] == collectionPDH) &&
logentries.Items[0].Properties["collection_file_path"] == filepath {
break
}
@@ -1321,7 +1628,7 @@ func (s *IntegrationSuite) TestDownloadLoggingPermission(c *check.C) {
},
}
s.checkUploadDownloadRequest(c, req, http.StatusOK, "download", adminperm,
- arvadostest.AdminUserUUID, arvadostest.FooCollection, "foo")
+ arvadostest.AdminUserUUID, arvadostest.FooCollection, arvadostest.FooCollectionPDH, "foo")
// Test user permission
req = &http.Request{
@@ -1334,7 +1641,7 @@ func (s *IntegrationSuite) TestDownloadLoggingPermission(c *check.C) {
},
}
s.checkUploadDownloadRequest(c, req, http.StatusOK, "download", userperm,
- arvadostest.ActiveUserUUID, arvadostest.FooCollection, "foo")
+ arvadostest.ActiveUserUUID, arvadostest.FooCollection, arvadostest.FooCollectionPDH, "foo")
}
}
@@ -1354,7 +1661,7 @@ func (s *IntegrationSuite) TestDownloadLoggingPermission(c *check.C) {
},
}
s.checkUploadDownloadRequest(c, req, http.StatusOK, "download", true,
- arvadostest.ActiveUserUUID, arvadostest.MultilevelCollection1, "dir1/subdir/file1")
+ arvadostest.ActiveUserUUID, arvadostest.MultilevelCollection1, arvadostest.MultilevelCollection1PDH, "dir1/subdir/file1")
}
u = mustParseURL("http://" + strings.Replace(arvadostest.FooCollectionPDH, "+", "-", 1) + ".keep-web.example/foo")
@@ -1368,7 +1675,7 @@ func (s *IntegrationSuite) TestDownloadLoggingPermission(c *check.C) {
},
}
s.checkUploadDownloadRequest(c, req, http.StatusOK, "download", true,
- arvadostest.ActiveUserUUID, arvadostest.FooCollection, "foo")
+ arvadostest.ActiveUserUUID, "", arvadostest.FooCollectionPDH, "foo")
}
func (s *IntegrationSuite) TestUploadLoggingPermission(c *check.C) {
@@ -1408,7 +1715,7 @@ func (s *IntegrationSuite) TestUploadLoggingPermission(c *check.C) {
Body: io.NopCloser(bytes.NewReader([]byte("bar"))),
}
s.checkUploadDownloadRequest(c, req, http.StatusCreated, "upload", adminperm,
- arvadostest.AdminUserUUID, coll.UUID, "bar")
+ arvadostest.AdminUserUUID, coll.UUID, "", "bar")
// Test user permission
req = &http.Request{
@@ -1422,7 +1729,76 @@ func (s *IntegrationSuite) TestUploadLoggingPermission(c *check.C) {
Body: io.NopCloser(bytes.NewReader([]byte("bar"))),
}
s.checkUploadDownloadRequest(c, req, http.StatusCreated, "upload", userperm,
- arvadostest.ActiveUserUUID, coll.UUID, "bar")
+ arvadostest.ActiveUserUUID, coll.UUID, "", "bar")
+ }
+ }
+}
+
+func (s *IntegrationSuite) TestConcurrentWrites(c *check.C) {
+ s.handler.Cluster.Collections.WebDAVCache.TTL = arvados.Duration(time.Second * 2)
+ lockTidyInterval = time.Second
+ client := arvados.NewClientFromEnv()
+ client.AuthToken = arvadostest.ActiveTokenV2
+ // Start small, and increase concurrency (2^2, 4^2, ...)
+ // only until hitting failure. Avoids unnecessarily long
+ // failure reports.
+ for n := 2; n < 16 && !c.Failed(); n = n * 2 {
+ c.Logf("%s: n=%d", c.TestName(), n)
+
+ var coll arvados.Collection
+ err := client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, nil)
+ c.Assert(err, check.IsNil)
+ defer client.RequestAndDecode(&coll, "DELETE", "arvados/v1/collections/"+coll.UUID, nil, nil)
+
+ var wg sync.WaitGroup
+ for i := 0; i < n && !c.Failed(); i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ u := mustParseURL(fmt.Sprintf("http://%s.collections.example.com/i=%d", coll.UUID, i))
+ resp := httptest.NewRecorder()
+ req, err := http.NewRequest("MKCOL", u.String(), nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ s.handler.ServeHTTP(resp, req)
+ c.Assert(resp.Code, check.Equals, http.StatusCreated)
+ for j := 0; j < n && !c.Failed(); j++ {
+ j := j
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ content := fmt.Sprintf("i=%d/j=%d", i, j)
+ u := mustParseURL("http://" + coll.UUID + ".collections.example.com/" + content)
+
+ resp := httptest.NewRecorder()
+ req, err := http.NewRequest("PUT", u.String(), strings.NewReader(content))
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ s.handler.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusCreated)
+
+ time.Sleep(time.Second)
+ resp = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", u.String(), nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ s.handler.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ c.Check(resp.Body.String(), check.Equals, content)
+ }()
+ }
+ }()
+ }
+ wg.Wait()
+ for i := 0; i < n; i++ {
+ u := mustParseURL(fmt.Sprintf("http://%s.collections.example.com/i=%d", coll.UUID, i))
+ resp := httptest.NewRecorder()
+ req, err := http.NewRequest("PROPFIND", u.String(), &bytes.Buffer{})
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "Bearer "+client.AuthToken)
+ s.handler.ServeHTTP(resp, req)
+ c.Assert(resp.Code, check.Equals, http.StatusMultiStatus)
}
}
}
diff --git a/services/keep-web/main.go b/services/keep-web/main.go
index 7a23cd1fad..690e75a251 100644
--- a/services/keep-web/main.go
+++ b/services/keep-web/main.go
@@ -16,10 +16,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
-var (
- version = "dev"
-)
-
var Command = service.Command(arvados.ServiceNameKeepweb, newHandlerOrErrorHandler)
func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
@@ -45,5 +41,6 @@ func newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg
logger: logger,
registry: reg,
},
+ metrics: newMetrics(reg),
}, nil
}
diff --git a/services/keep-web/metrics.go b/services/keep-web/metrics.go
new file mode 100644
index 0000000000..b989988dd5
--- /dev/null
+++ b/services/keep-web/metrics.go
@@ -0,0 +1,155 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepweb
+
+import (
+ "io"
+ "math"
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type metrics struct {
+ mDownloadSpeed *prometheus.HistogramVec
+ mDownloadBackendSpeed *prometheus.HistogramVec
+ mUploadSpeed *prometheus.HistogramVec
+ mUploadSyncDelay *prometheus.HistogramVec
+}
+
+func newMetrics(reg *prometheus.Registry) *metrics {
+ m := &metrics{
+ mDownloadSpeed: prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "arvados",
+ Subsystem: "keepweb",
+ Name: "download_speed",
+ Help: "Download speed (bytes per second) bucketed by transfer size range",
+ Buckets: []float64{10_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, math.Inf(+1)},
+ }, []string{"size_range"}),
+ mDownloadBackendSpeed: prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "arvados",
+ Subsystem: "keepweb",
+ Name: "download_apparent_backend_speed",
+ Help: "Apparent download speed from the backend (bytes per second) when serving file downloads, bucketed by transfer size range (see https://dev.arvados.org/projects/arvados/wiki/WebDAV_performance_metrics for explanation)",
+ Buckets: []float64{10_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, math.Inf(+1)},
+ }, []string{"size_range"}),
+ mUploadSpeed: prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "arvados",
+ Subsystem: "keepweb",
+ Name: "upload_speed",
+ Help: "Upload speed (bytes per second) bucketed by transfer size range",
+ Buckets: []float64{10_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, math.Inf(+1)},
+ }, []string{"size_range"}),
+ mUploadSyncDelay: prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "arvados",
+ Subsystem: "keepweb",
+ Name: "upload_sync_delay_seconds",
+ Help: "Upload sync delay (time from last byte received to HTTP response)",
+ }, []string{"size_range"}),
+ }
+ reg.MustRegister(m.mDownloadSpeed)
+ reg.MustRegister(m.mDownloadBackendSpeed)
+ reg.MustRegister(m.mUploadSpeed)
+ reg.MustRegister(m.mUploadSyncDelay)
+ return m
+}
+
+// run handler(w,r) and record upload/download metrics as applicable.
+func (m *metrics) track(handler http.Handler, w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case http.MethodGet:
+ dt := newDownloadTracker(w)
+ handler.ServeHTTP(dt, r)
+ size := dt.bytesOut
+ if size == 0 {
+ return
+ }
+ bucket := sizeRange(size)
+ m.mDownloadSpeed.WithLabelValues(bucket).Observe(float64(dt.bytesOut) / time.Since(dt.t0).Seconds())
+ m.mDownloadBackendSpeed.WithLabelValues(bucket).Observe(float64(size) / (dt.backendWait + time.Since(dt.lastByte)).Seconds())
+ case http.MethodPut:
+ ut := newUploadTracker(r)
+ handler.ServeHTTP(w, r)
+ d := ut.lastByte.Sub(ut.t0)
+ if d <= 0 {
+ // Read() was not called, or did not return
+ // any data
+ return
+ }
+ size := ut.bytesIn
+ bucket := sizeRange(size)
+ m.mUploadSpeed.WithLabelValues(bucket).Observe(float64(ut.bytesIn) / d.Seconds())
+ m.mUploadSyncDelay.WithLabelValues(bucket).Observe(time.Since(ut.lastByte).Seconds())
+ default:
+ handler.ServeHTTP(w, r)
+ }
+}
+
+// Assign a sizeRange based on number of bytes transferred (not the
+// same as file size in the case of a Range request or interrupted
+// transfer).
+func sizeRange(size int64) string {
+ switch {
+ case size < 1_000_000:
+ return "0"
+ case size < 10_000_000:
+ return "1M"
+ case size < 100_000_000:
+ return "10M"
+ default:
+ return "100M"
+ }
+}
+
+type downloadTracker struct {
+ http.ResponseWriter
+ t0 time.Time
+
+ firstByte time.Time // time of first call to Write
+ lastByte time.Time // time of most recent call to Write
+ bytesOut int64 // bytes sent to client so far
+ backendWait time.Duration // total of intervals between Write calls
+}
+
+func newDownloadTracker(w http.ResponseWriter) *downloadTracker {
+ return &downloadTracker{ResponseWriter: w, t0: time.Now()}
+}
+
+func (dt *downloadTracker) Write(p []byte) (int, error) {
+ if dt.lastByte.IsZero() {
+ dt.backendWait += time.Since(dt.t0)
+ } else {
+ dt.backendWait += time.Since(dt.lastByte)
+ }
+ if dt.firstByte.IsZero() {
+ dt.firstByte = time.Now()
+ }
+ n, err := dt.ResponseWriter.Write(p)
+ dt.bytesOut += int64(n)
+ dt.lastByte = time.Now()
+ return n, err
+}
+
+type uploadTracker struct {
+ io.ReadCloser
+ t0 time.Time
+ lastByte time.Time
+ bytesIn int64
+}
+
+func newUploadTracker(r *http.Request) *uploadTracker {
+ now := time.Now()
+ ut := &uploadTracker{ReadCloser: r.Body, t0: now}
+ r.Body = ut
+ return ut
+}
+
+func (ut *uploadTracker) Read(p []byte) (int, error) {
+ n, err := ut.ReadCloser.Read(p)
+ ut.lastByte = time.Now()
+ ut.bytesIn += int64(n)
+ return n, err
+}
diff --git a/services/keep-web/s3.go b/services/keep-web/s3.go
index 1f458f8e59..3e60f3006d 100644
--- a/services/keep-web/s3.go
+++ b/services/keep-web/s3.go
@@ -27,10 +27,7 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
- "github.com/AdRoll/goamz/s3"
)
const (
@@ -44,11 +41,17 @@ type commonPrefix struct {
}
type listV1Resp struct {
- XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
- s3.ListResp
- // s3.ListResp marshals an empty tag when
- // CommonPrefixes is nil, which confuses some clients.
- // Fix by using this nested struct instead.
+ XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
+ Name string
+ Prefix string
+ Delimiter string
+ Marker string
+ MaxKeys int
+ IsTruncated bool
+ Contents []s3Key
+ // If we use a []string here, xml marshals an empty tag when
+ // CommonPrefixes is nil, which confuses some clients. Fix by
+ // using this nested struct instead.
CommonPrefixes []commonPrefix
// Similarly, we need omitempty here, because an empty
// tag confuses some clients (e.g.,
@@ -62,7 +65,7 @@ type listV1Resp struct {
type listV2Resp struct {
XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
IsTruncated bool
- Contents []s3.Key
+ Contents []s3Key
Name string
Prefix string
Delimiter string
@@ -75,6 +78,21 @@ type listV2Resp struct {
StartAfter string `xml:",omitempty"`
}
+type s3Key struct {
+ Key string
+ LastModified string
+ Size int64
+ // The following fields are not populated, but are here in
+ // case clients rely on the keys being present in xml
+ // responses.
+ ETag string
+ StorageClass string
+ Owner struct {
+ ID string
+ DisplayName string
+ }
+}
+
func hmacstring(msg string, key []byte) []byte {
h := hmac.New(sha256.New, key)
io.WriteString(h, msg)
@@ -312,33 +330,19 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
return false
}
- var err error
- var fs arvados.CustomFileSystem
- var arvclient *arvadosclient.ArvadosClient
- if r.Method == http.MethodGet || r.Method == http.MethodHead {
- // Use a single session (cached FileSystem) across
- // multiple read requests.
- var sess *cachedSession
- fs, sess, err = h.Cache.GetSession(token)
- if err != nil {
- s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
- return true
- }
- arvclient = sess.arvadosclient
- } else {
+ fs, sess, tokenUser, err := h.Cache.GetSession(token)
+ if err != nil {
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
+ return true
+ }
+ defer sess.Release()
+ readfs := fs
+ if writeMethod[r.Method] {
// Create a FileSystem for this request, to avoid
// exposing incomplete write operations to concurrent
// requests.
- var kc *keepclient.KeepClient
- var release func()
- var client *arvados.Client
- arvclient, kc, client, release, err = h.getClients(r.Header.Get("X-Request-Id"), token)
- if err != nil {
- s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
- return true
- }
- defer release()
- fs = client.SiteFileSystem(kc)
+ client := sess.client.WithRequestID(r.Header.Get("X-Request-Id"))
+ fs = client.SiteFileSystem(sess.keepclient)
fs.ForwardSlashNameSubstitution(h.Cluster.Collections.ForwardSlashNameSubstitution)
}
@@ -418,12 +422,11 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
return true
}
- tokenUser, err := h.Cache.GetTokenUser(token)
if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
http.Error(w, "Not permitted", http.StatusForbidden)
return true
}
- h.logUploadOrDownload(r, arvclient, fs, fspath, nil, tokenUser)
+ h.logUploadOrDownload(r, sess.arvadosclient, fs, fspath, nil, tokenUser)
// shallow copy r, and change URL path
r := *r
@@ -514,12 +517,11 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
}
defer f.Close()
- tokenUser, err := h.Cache.GetTokenUser(token)
if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
http.Error(w, "Not permitted", http.StatusForbidden)
return true
}
- h.logUploadOrDownload(r, arvclient, fs, fspath, nil, tokenUser)
+ h.logUploadOrDownload(r, sess.arvadosclient, fs, fspath, nil, tokenUser)
_, err = io.Copy(f, r.Body)
if err != nil {
@@ -534,14 +536,12 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
return true
}
}
- err = fs.Sync()
+ err = h.syncCollection(fs, readfs, fspath)
if err != nil {
err = fmt.Errorf("sync failed: %w", err)
s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
return true
}
- // Ensure a subsequent read operation will see the changes.
- h.Cache.ResetSession(token)
w.WriteHeader(http.StatusOK)
return true
case r.Method == http.MethodDelete:
@@ -588,14 +588,12 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
return true
}
- err = fs.Sync()
+ err = h.syncCollection(fs, readfs, fspath)
if err != nil {
err = fmt.Errorf("sync failed: %w", err)
s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
return true
}
- // Ensure a subsequent read operation will see the changes.
- h.Cache.ResetSession(token)
w.WriteHeader(http.StatusNoContent)
return true
default:
@@ -604,6 +602,34 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
}
}
+// Save modifications to the indicated collection in srcfs, then (if
+// successful) ensure they are also reflected in dstfs.
+func (h *handler) syncCollection(srcfs, dstfs arvados.CustomFileSystem, path string) error {
+ coll, _ := h.determineCollection(srcfs, path)
+ if coll == nil || coll.UUID == "" {
+ return errors.New("could not determine collection to sync")
+ }
+ d, err := srcfs.OpenFile("by_id/"+coll.UUID, os.O_RDWR, 0777)
+ if err != nil {
+ return err
+ }
+ defer d.Close()
+ err = d.Sync()
+ if err != nil {
+ return err
+ }
+ snap, err := d.Snapshot()
+ if err != nil {
+ return err
+ }
+ dstd, err := dstfs.OpenFile("by_id/"+coll.UUID, os.O_RDWR, 0777)
+ if err != nil {
+ return err
+ }
+ defer dstd.Close()
+ return dstd.Splice(snap)
+}
+
func setFileInfoHeaders(header http.Header, fs arvados.CustomFileSystem, path string) error {
maybeEncode := func(s string) string {
for _, c := range s {
@@ -747,6 +773,9 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
http.Error(w, "invalid continuation token", http.StatusBadRequest)
return
}
+ // marker and start-after perform the same function,
+ // but we keep them separate so we can repeat them
+ // back to the client in the response.
params.marker = string(marker)
params.startAfter = r.FormValue("start-after")
switch r.FormValue("encoding-type") {
@@ -758,9 +787,17 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
return
}
} else {
+ // marker is functionally equivalent to start-after.
params.marker = r.FormValue("marker")
}
+ // startAfter is params.marker or params.startAfter, whichever
+ // comes last.
+ startAfter := params.startAfter
+ if startAfter < params.marker {
+ startAfter = params.marker
+ }
+
bucketdir := "by_id/" + bucket
// walkpath is the directory (relative to bucketdir) we need
// to walk: the innermost directory that is guaranteed to
@@ -784,9 +821,15 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
ContinuationToken: r.FormValue("continuation-token"),
StartAfter: params.startAfter,
}
+
+ // nextMarker will be the last path we add to either
+ // resp.Contents or commonPrefixes. It will be included in
+ // the response as NextMarker or NextContinuationToken if
+ // needed.
nextMarker := ""
commonPrefixes := map[string]bool{}
+ full := false
err := walkFS(fs, strings.TrimSuffix(bucketdir+"/"+walkpath, "/"), true, func(path string, fi os.FileInfo) error {
if path == bucketdir {
return nil
@@ -797,36 +840,29 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
path += "/"
filesize = 0
}
- if len(path) <= len(params.prefix) {
- if path > params.prefix[:len(path)] {
- // with prefix "foobar", walking "fooz" means we're done
- return errDone
- }
- if path < params.prefix[:len(path)] {
- // with prefix "foobar", walking "foobag" is pointless
- return filepath.SkipDir
- }
- if fi.IsDir() && !strings.HasPrefix(params.prefix+"/", path) {
- // with prefix "foo/bar", walking "fo"
- // is pointless (but walking "foo" or
- // "foo/bar" is necessary)
- return filepath.SkipDir
- }
- if len(path) < len(params.prefix) {
- // can't skip anything, and this entry
- // isn't in the results, so just
- // continue descent
- return nil
- }
- } else {
- if path[:len(params.prefix)] > params.prefix {
- // with prefix "foobar", nothing we
- // see after "foozzz" is relevant
- return errDone
- }
- }
- if path < params.marker || path < params.prefix || path <= params.startAfter {
+ if strings.HasPrefix(params.prefix, path) && params.prefix != path {
+ // Descend into subtree until we reach desired prefix
+ return nil
+ } else if path < params.prefix {
+ // Not an ancestor or descendant of desired
+ // prefix, therefore none of its descendants
+ // can be either -- skip
+ return filepath.SkipDir
+ } else if path > params.prefix && !strings.HasPrefix(path, params.prefix) {
+ // We must have traversed everything under
+ // desired prefix
+ return errDone
+ } else if path == startAfter {
+ // Skip startAfter itself, just descend into
+ // subtree
+ return nil
+ } else if strings.HasPrefix(startAfter, path) {
+ // Descend into subtree in case it contains
+ // something after startAfter
return nil
+ } else if path < startAfter {
+ // Skip ahead until we reach startAfter
+ return filepath.SkipDir
}
if fi.IsDir() && !h.Cluster.Collections.S3FolderObjects {
// Note we don't add anything to
@@ -836,13 +872,6 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
// finding a regular file inside it.
return nil
}
- if len(resp.Contents)+len(commonPrefixes) >= params.maxKeys {
- resp.IsTruncated = true
- if params.delimiter != "" || params.v2 {
- nextMarker = path
- }
- return errDone
- }
if params.delimiter != "" {
idx := strings.Index(path[len(params.prefix):], params.delimiter)
if idx >= 0 {
@@ -850,21 +879,42 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
// "z", when we hit "foobar/baz", we
// add "/baz" to commonPrefixes and
// stop descending.
- commonPrefixes[path[:len(params.prefix)+idx+1]] = true
- return filepath.SkipDir
+ prefix := path[:len(params.prefix)+idx+1]
+ if prefix == startAfter {
+ return nil
+ } else if prefix < startAfter && !strings.HasPrefix(startAfter, prefix) {
+ return nil
+ } else if full {
+ resp.IsTruncated = true
+ return errDone
+ } else {
+ commonPrefixes[prefix] = true
+ nextMarker = prefix
+ full = len(resp.Contents)+len(commonPrefixes) >= params.maxKeys
+ return filepath.SkipDir
+ }
}
}
- resp.Contents = append(resp.Contents, s3.Key{
+ if full {
+ resp.IsTruncated = true
+ return errDone
+ }
+ resp.Contents = append(resp.Contents, s3Key{
Key: path,
LastModified: fi.ModTime().UTC().Format("2006-01-02T15:04:05.999") + "Z",
Size: filesize,
})
+ nextMarker = path
+ full = len(resp.Contents)+len(commonPrefixes) >= params.maxKeys
return nil
})
if err != nil && err != errDone {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
+ if params.delimiter == "" && !params.v2 || !resp.IsTruncated {
+ nextMarker = ""
+ }
if params.delimiter != "" {
resp.CommonPrefixes = make([]commonPrefix, 0, len(commonPrefixes))
for prefix := range commonPrefixes {
@@ -918,15 +968,13 @@ func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request,
CommonPrefixes: resp.CommonPrefixes,
NextMarker: nextMarker,
KeyCount: resp.KeyCount,
- ListResp: s3.ListResp{
- IsTruncated: resp.IsTruncated,
- Name: bucket,
- Prefix: params.prefix,
- Delimiter: params.delimiter,
- Marker: params.marker,
- MaxKeys: params.maxKeys,
- Contents: resp.Contents,
- },
+ IsTruncated: resp.IsTruncated,
+ Name: bucket,
+ Prefix: params.prefix,
+ Delimiter: params.delimiter,
+ Marker: params.marker,
+ MaxKeys: params.maxKeys,
+ Contents: resp.Contents,
}
}
diff --git a/services/keep-web/s3_test.go b/services/keep-web/s3_test.go
index 851bee4b72..79b3712c6b 100644
--- a/services/keep-web/s3_test.go
+++ b/services/keep-web/s3_test.go
@@ -17,6 +17,7 @@ import (
"net/url"
"os"
"os/exec"
+ "sort"
"strings"
"sync"
"time"
@@ -316,14 +317,19 @@ func (s *IntegrationSuite) TestS3PropertiesAsMetadata(c *check.C) {
func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
stage := s.s3setup(c)
defer stage.teardown(c)
- s.testS3PutObjectSuccess(c, stage.collbucket, "")
+ s.testS3PutObjectSuccess(c, stage.collbucket, "", stage.coll.UUID)
}
func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
stage := s.s3setup(c)
defer stage.teardown(c)
- s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
+ s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/", stage.coll.UUID)
}
-func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
+func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string, collUUID string) {
+ // We insert a delay between test cases to ensure we exercise
+ // rollover of expired sessions.
+ sleep := time.Second / 100
+ s.handler.Cluster.Collections.WebDAVCache.TTL = arvados.Duration(sleep * 3)
+
for _, trial := range []struct {
path string
size int
@@ -359,6 +365,7 @@ func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket,
contentType: "application/x-directory",
},
} {
+ time.Sleep(sleep)
c.Logf("=== %v", trial)
objname := prefix + trial.path
@@ -367,7 +374,7 @@ func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket,
if !c.Check(err, check.NotNil) {
continue
}
- c.Check(err.(*s3.Error).StatusCode, check.Equals, 404)
+ c.Check(err.(*s3.Error).StatusCode, check.Equals, http.StatusNotFound)
c.Check(err.(*s3.Error).Code, check.Equals, `NoSuchKey`)
if !c.Check(err, check.ErrorMatches, `The specified key does not exist.`) {
continue
@@ -390,6 +397,14 @@ func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket,
c.Check(err, check.IsNil)
c.Check(buf2, check.HasLen, len(buf))
c.Check(bytes.Equal(buf, buf2), check.Equals, true)
+
+ // Check that the change is immediately visible via
+ // (non-S3) webdav request.
+ _, resp := s.do("GET", "http://"+collUUID+".keep-web.example/"+trial.path, arvadostest.ActiveTokenV2, nil)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ if !strings.HasSuffix(trial.path, "/") {
+ c.Check(resp.Body.Len(), check.Equals, trial.size)
+ }
}
}
@@ -809,8 +824,8 @@ func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
var markers int
for markers, s.handler.Cluster.Collections.S3FolderObjects = range []bool{false, true} {
- dirs := 2
- filesPerDir := 1001
+ dirs := 2000
+ filesPerDir := 2
stage.writeBigDirs(c, dirs, filesPerDir)
// Total # objects is:
// 2 file entries from s3setup (emptyfile and sailboat.txt)
@@ -819,6 +834,7 @@ func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
// +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
+ s.testS3List(c, stage.collbucket, "", 51, markers+2+(filesPerDir+markers)*dirs)
s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
}
}
@@ -841,6 +857,9 @@ func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix stri
break
}
for _, key := range resp.Contents {
+ if _, dup := gotKeys[key.Key]; dup {
+ c.Errorf("got duplicate key %q on page %d", key.Key, pages)
+ }
gotKeys[key.Key] = key
if strings.Contains(key.Key, "sailboat.txt") {
c.Check(key.Size, check.Equals, int64(4))
@@ -855,7 +874,16 @@ func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix stri
}
nextMarker = resp.NextMarker
}
- c.Check(len(gotKeys), check.Equals, expectFiles)
+ if !c.Check(len(gotKeys), check.Equals, expectFiles) {
+ var sorted []string
+ for k := range gotKeys {
+ sorted = append(sorted, k)
+ }
+ sort.Strings(sorted)
+ for _, k := range sorted {
+ c.Logf("got %s", k)
+ }
+ }
}
func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
@@ -921,7 +949,8 @@ func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
{"dir0", "", ""},
{"dir0/", "", ""},
{"dir0/f", "", ""},
- {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
+ {"dir0", "/", "dir0/file14.txt"}, // one commonprefix, "dir0/"
+ {"dir0", "/", "dir0/zzzzfile.txt"}, // no commonprefixes
{"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
{"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
{"", "", "dir1/file498.txt"}, // last page of results
@@ -952,28 +981,31 @@ func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
var expectTruncated bool
for _, key := range allfiles {
full := len(expectKeys)+len(expectPrefixes) >= maxKeys
- if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
+ if !strings.HasPrefix(key, trial.prefix) || key <= trial.marker {
continue
} else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
prefix := key[:len(trial.prefix)+idx+1]
if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
// same prefix as previous key
} else if full {
- expectNextMarker = key
expectTruncated = true
} else {
expectPrefixes = append(expectPrefixes, prefix)
+ expectNextMarker = prefix
}
} else if full {
- if trial.delimiter != "" {
- expectNextMarker = key
- }
expectTruncated = true
break
} else {
expectKeys = append(expectKeys, key)
+ if trial.delimiter != "" {
+ expectNextMarker = key
+ }
}
}
+ if !expectTruncated {
+ expectNextMarker = ""
+ }
var gotKeys []string
for _, key := range resp.Contents {
@@ -992,6 +1024,61 @@ func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
}
}
+func (s *IntegrationSuite) TestS3ListObjectsV2ManySubprojects(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ projects := 50
+ collectionsPerProject := 2
+ for i := 0; i < projects; i++ {
+ var subproj arvados.Group
+ err := stage.arv.RequestAndDecode(&subproj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
+ "group": map[string]interface{}{
+ "owner_uuid": stage.subproj.UUID,
+ "group_class": "project",
+ "name": fmt.Sprintf("keep-web s3 test subproject %d", i),
+ },
+ })
+ c.Assert(err, check.IsNil)
+ for j := 0; j < collectionsPerProject; j++ {
+ err = stage.arv.RequestAndDecode(nil, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
+ "owner_uuid": subproj.UUID,
+ "name": fmt.Sprintf("keep-web s3 test collection %d", j),
+ "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
+ }})
+ c.Assert(err, check.IsNil)
+ }
+ }
+ c.Logf("setup complete")
+
+ sess := aws_session.Must(aws_session.NewSession(&aws_aws.Config{
+ Region: aws_aws.String("auto"),
+ Endpoint: aws_aws.String(s.testServer.URL),
+ Credentials: aws_credentials.NewStaticCredentials(url.QueryEscape(arvadostest.ActiveTokenV2), url.QueryEscape(arvadostest.ActiveTokenV2), ""),
+ S3ForcePathStyle: aws_aws.Bool(true),
+ }))
+ client := aws_s3.New(sess)
+ ctx := context.Background()
+ params := aws_s3.ListObjectsV2Input{
+ Bucket: aws_aws.String(stage.proj.UUID),
+ Delimiter: aws_aws.String("/"),
+ Prefix: aws_aws.String("keep-web s3 test subproject/"),
+ MaxKeys: aws_aws.Int64(int64(projects / 2)),
+ }
+ for page := 1; ; page++ {
+ t0 := time.Now()
+ result, err := client.ListObjectsV2WithContext(ctx, ¶ms)
+ if !c.Check(err, check.IsNil) {
+ break
+ }
+ c.Logf("got page %d in %v with len(Contents) == %d, len(CommonPrefixes) == %d", page, time.Since(t0), len(result.Contents), len(result.CommonPrefixes))
+ if !*result.IsTruncated {
+ break
+ }
+ params.ContinuationToken = result.NextContinuationToken
+ *params.MaxKeys = *params.MaxKeys/2 + 1
+ }
+}
+
func (s *IntegrationSuite) TestS3ListObjectsV2(c *check.C) {
stage := s.s3setup(c)
defer stage.teardown(c)
@@ -1208,7 +1295,11 @@ func (s *IntegrationSuite) TestS3cmd(c *check.C) {
cmd = exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.URL[7:], "--host-bucket="+s.testServer.URL[7:], "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "get", "s3://"+arvadostest.FooCollection+"/foo,;$[|]bar", tmpfile)
buf, err = cmd.CombinedOutput()
c.Check(err, check.NotNil)
- c.Check(string(buf), check.Matches, `(?ms).*NoSuchKey.*\n`)
+ // As of commit b7520e5c25e1bf25c1a8bf5aa2eadb299be8f606
+ // (between debian bullseye and bookworm versions), s3cmd
+ // started catching the NoSuchKey error code and replacing it
+ // with "Source object '%s' does not exist.".
+ c.Check(string(buf), check.Matches, `(?ms).*(NoSuchKey|Source object.*does not exist).*\n`)
}
func (s *IntegrationSuite) TestS3BucketInHost(c *check.C) {
diff --git a/services/keep-web/server_test.go b/services/keep-web/server_test.go
index 61c540808b..0308f949f4 100644
--- a/services/keep-web/server_test.go
+++ b/services/keep-web/server_test.go
@@ -29,6 +29,7 @@ import (
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
"git.arvados.org/arvados.git/sdk/go/keepclient"
+ "github.com/prometheus/client_golang/prometheus"
check "gopkg.in/check.v1"
)
@@ -47,19 +48,19 @@ func (s *IntegrationSuite) TestNoToken(c *check.C) {
"",
"bogustoken",
} {
- hdr, body, _ := s.runCurl(c, token, "collections.example.com", "/collections/"+arvadostest.FooCollection+"/foo")
- c.Check(hdr, check.Matches, `(?s)HTTP/1.1 404 Not Found\r\n.*`)
- c.Check(body, check.Equals, notFoundMessage+"\n")
+ hdr, body, _ := s.runCurl(c, token, s.handler.Cluster.Services.WebDAVDownload.ExternalURL.Host, "/c="+arvadostest.FooCollection+"/foo")
+ c.Check(hdr, check.Matches, `(?s)HTTP/1.1 401 Unauthorized\r\n.*`)
+ c.Check(strings.TrimSpace(body), check.Equals, unauthorizedMessage)
if token != "" {
- hdr, body, _ = s.runCurl(c, token, "collections.example.com", "/collections/download/"+arvadostest.FooCollection+"/"+token+"/foo")
+ hdr, body, _ = s.runCurl(c, token, s.handler.Cluster.Services.WebDAVDownload.ExternalURL.Host, "/collections/download/"+arvadostest.FooCollection+"/"+token+"/foo")
c.Check(hdr, check.Matches, `(?s)HTTP/1.1 404 Not Found\r\n.*`)
- c.Check(body, check.Equals, notFoundMessage+"\n")
+ c.Check(strings.TrimSpace(body), check.Equals, notFoundMessage)
}
- hdr, body, _ = s.runCurl(c, token, "collections.example.com", "/bad-route")
+ hdr, body, _ = s.runCurl(c, token, s.handler.Cluster.Services.WebDAVDownload.ExternalURL.Host, "/bad-route")
c.Check(hdr, check.Matches, `(?s)HTTP/1.1 404 Not Found\r\n.*`)
- c.Check(body, check.Equals, notFoundMessage+"\n")
+ c.Check(strings.TrimSpace(body), check.Equals, notFoundMessage)
}
}
@@ -74,25 +75,21 @@ func (s *IntegrationSuite) Test404(c *check.C) {
"/download",
"/collections",
"/collections/",
- // Implicit/generated index is not implemented yet;
- // until then, return 404.
- "/collections/" + arvadostest.FooCollection,
- "/collections/" + arvadostest.FooCollection + "/",
- "/collections/" + arvadostest.FooBarDirCollection + "/dir1",
- "/collections/" + arvadostest.FooBarDirCollection + "/dir1/",
- // Non-existent file in collection
- "/collections/" + arvadostest.FooCollection + "/theperthcountyconspiracy",
+ // Non-existent file/directory
+ "/c=" + arvadostest.FooCollection + "/theperthcountyconspiracy",
+ "/c=" + arvadostest.FooCollection + "/theperthcountyconspiracy/",
"/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/theperthcountyconspiracy",
+ "/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/theperthcountyconspiracy/",
// Non-existent collection
- "/collections/" + arvadostest.NonexistentCollection,
- "/collections/" + arvadostest.NonexistentCollection + "/",
- "/collections/" + arvadostest.NonexistentCollection + "/theperthcountyconspiracy",
+ "/c=" + arvadostest.NonexistentCollection,
+ "/c=" + arvadostest.NonexistentCollection + "/",
+ "/c=" + arvadostest.NonexistentCollection + "/theperthcountyconspiracy",
"/collections/download/" + arvadostest.NonexistentCollection + "/" + arvadostest.ActiveToken + "/theperthcountyconspiracy",
} {
- hdr, body, _ := s.runCurl(c, arvadostest.ActiveToken, "collections.example.com", uri)
+ hdr, body, _ := s.runCurl(c, arvadostest.ActiveToken, s.handler.Cluster.Services.WebDAVDownload.ExternalURL.Host, uri)
c.Check(hdr, check.Matches, "(?s)HTTP/1.1 404 Not Found\r\n.*")
if len(body) > 0 {
- c.Check(body, check.Equals, notFoundMessage+"\n")
+ c.Check(strings.TrimSpace(body), check.Equals, notFoundMessage)
}
}
}
@@ -263,10 +260,14 @@ func (s *IntegrationSuite) Test200(c *check.C) {
}
// Return header block and body.
-func (s *IntegrationSuite) runCurl(c *check.C, auth, host, uri string, args ...string) (hdr, bodyPart string, bodySize int64) {
+func (s *IntegrationSuite) runCurl(c *check.C, auth, hostport, uri string, args ...string) (hdr, bodyPart string, bodySize int64) {
curlArgs := []string{"--silent", "--show-error", "--include"}
testHost, testPort, _ := net.SplitHostPort(s.testServer.URL[7:])
- curlArgs = append(curlArgs, "--resolve", host+":"+testPort+":"+testHost)
+ host, port, _ := net.SplitHostPort(hostport)
+ if port == "" {
+ port = "80"
+ }
+ curlArgs = append(curlArgs, "--connect-to", host+":"+port+":"+testHost+":"+testPort)
if strings.Contains(auth, " ") {
// caller supplied entire Authorization header value
curlArgs = append(curlArgs, "-H", "Authorization: "+auth)
@@ -275,7 +276,7 @@ func (s *IntegrationSuite) runCurl(c *check.C, auth, host, uri string, args ...s
curlArgs = append(curlArgs, "-H", "Authorization: Bearer "+auth)
}
curlArgs = append(curlArgs, args...)
- curlArgs = append(curlArgs, "http://"+host+":"+testPort+uri)
+ curlArgs = append(curlArgs, "http://"+hostport+uri)
c.Log(fmt.Sprintf("curlArgs == %#v", curlArgs))
cmd := exec.Command("curl", curlArgs...)
stdout, err := cmd.StdoutPipe()
@@ -411,6 +412,24 @@ func (s *IntegrationSuite) TestMetrics(c *check.C) {
resp.Body.Close()
}
+ var coll arvados.Collection
+ arv, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, check.IsNil)
+ arv.ApiToken = arvadostest.ActiveTokenV2
+ err = arv.Create("collections", map[string]interface{}{"ensure_unique_name": true}, &coll)
+ c.Assert(err, check.IsNil)
+ defer arv.Delete("collections", coll.UUID, nil, nil)
+ for i := 0; i < 2; i++ {
+ size := 1 << (i * 12)
+ req, _ = http.NewRequest("PUT", srvaddr+"/zero-"+fmt.Sprintf("%d", size), bytes.NewReader(make([]byte, size)))
+ req.Host = coll.UUID + ".example.com"
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+ resp, err = http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusCreated)
+ resp.Body.Close()
+ }
+
time.Sleep(metricsUpdateInterval * 2)
req, _ = http.NewRequest("GET", srvaddr+"/metrics.json", nil)
@@ -475,15 +494,7 @@ func (s *IntegrationSuite) TestMetrics(c *check.C) {
c.Check(summaries["request_duration_seconds/get/200"].SampleCount, check.Equals, "3")
c.Check(summaries["request_duration_seconds/get/404"].SampleCount, check.Equals, "1")
c.Check(summaries["time_to_status_seconds/get/404"].SampleCount, check.Equals, "1")
- c.Check(counters["arvados_keepweb_collectioncache_requests//"].Value, check.Equals, int64(2))
- c.Check(counters["arvados_keepweb_collectioncache_api_calls//"].Value, check.Equals, int64(2))
- c.Check(counters["arvados_keepweb_collectioncache_hits//"].Value, check.Equals, int64(1))
- c.Check(counters["arvados_keepweb_collectioncache_pdh_hits//"].Value, check.Equals, int64(1))
- c.Check(gauges["arvados_keepweb_collectioncache_cached_manifests//"].Value, check.Equals, float64(1))
- // FooCollection's cached manifest size is 45 ("1f4b0....+45")
- // plus one 51-byte blob signature; session fs counts 3 inodes
- // * 64 bytes.
- c.Check(gauges["arvados_keepweb_sessions_cached_collection_bytes//"].Value, check.Equals, float64(45+51+64*3))
+ c.Check(gauges["arvados_keepweb_sessions_cached_session_bytes//"].Value, check.Equals, float64(1208))
// If the Host header indicates a collection, /metrics.json
// refers to a file in the collection -- the metrics handler
@@ -497,6 +508,22 @@ func (s *IntegrationSuite) TestMetrics(c *check.C) {
c.Assert(err, check.IsNil)
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
+
+ req, _ = http.NewRequest("GET", srvaddr+"/metrics", nil)
+ req.Host = cluster.Services.WebDAVDownload.ExternalURL.Host
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+ resp, err = http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ allmetrics, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+
+ c.Check(string(allmetrics), check.Matches, `(?ms).*\narvados_keepweb_download_apparent_backend_speed_bucket{size_range="0",le="\+Inf"} 4\n.*`)
+ c.Check(string(allmetrics), check.Matches, `(?ms).*\narvados_keepweb_download_speed_bucket{size_range="0",le="\+Inf"} 4\n.*`)
+ c.Check(string(allmetrics), check.Matches, `(?ms).*\narvados_keepweb_upload_speed_bucket{size_range="0",le="\+Inf"} 2\n.*`)
+ c.Check(string(allmetrics), check.Matches, `(?ms).*\narvados_keepweb_upload_sync_delay_seconds_bucket{size_range="0",le="10"} 2\n.*`)
+
+ c.Logf("%s", allmetrics)
}
func (s *IntegrationSuite) SetUpSuite(c *check.C) {
@@ -529,7 +556,7 @@ func (s *IntegrationSuite) SetUpTest(c *check.C) {
ctx := ctxlog.Context(context.Background(), logger)
- s.handler = newHandlerOrErrorHandler(ctx, cluster, cluster.SystemRootToken, nil).(*handler)
+ s.handler = newHandlerOrErrorHandler(ctx, cluster, cluster.SystemRootToken, prometheus.NewRegistry()).(*handler)
s.testServer = httptest.NewUnstartedServer(
httpserver.AddRequestIDs(
httpserver.LogRequests(
diff --git a/services/keep-web/webdav.go b/services/keep-web/webdav.go
deleted file mode 100644
index 501c355a73..0000000000
--- a/services/keep-web/webdav.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepweb
-
-import (
- "crypto/rand"
- "errors"
- "fmt"
- "io"
- prand "math/rand"
- "os"
- "path"
- "strings"
- "sync/atomic"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
-
- "golang.org/x/net/context"
- "golang.org/x/net/webdav"
-)
-
-var (
- lockPrefix string = uuid()
- nextLockSuffix int64 = prand.Int63()
- errReadOnly = errors.New("read-only filesystem")
-)
-
-// webdavFS implements a webdav.FileSystem by wrapping an
-// arvados.CollectionFilesystem.
-//
-// Collections don't preserve empty directories, so Mkdir is
-// effectively a no-op, and we need to make parent dirs spring into
-// existence automatically so sequences like "mkcol foo; put foo/bar"
-// work as expected.
-type webdavFS struct {
- collfs arvados.FileSystem
- writing bool
- // webdav PROPFIND reads the first few bytes of each file
- // whose filename extension isn't recognized, which is
- // prohibitively expensive: we end up fetching multiple 64MiB
- // blocks. Avoid this by returning EOF on all reads when
- // handling a PROPFIND.
- alwaysReadEOF bool
-}
-
-func (fs *webdavFS) makeparents(name string) {
- if !fs.writing {
- return
- }
- dir, _ := path.Split(name)
- if dir == "" || dir == "/" {
- return
- }
- dir = dir[:len(dir)-1]
- fs.makeparents(dir)
- fs.collfs.Mkdir(dir, 0755)
-}
-
-func (fs *webdavFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
- if !fs.writing {
- return errReadOnly
- }
- name = strings.TrimRight(name, "/")
- fs.makeparents(name)
- return fs.collfs.Mkdir(name, 0755)
-}
-
-func (fs *webdavFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (f webdav.File, err error) {
- writing := flag&(os.O_WRONLY|os.O_RDWR|os.O_TRUNC) != 0
- if writing {
- fs.makeparents(name)
- }
- f, err = fs.collfs.OpenFile(name, flag, perm)
- if !fs.writing {
- // webdav module returns 404 on all OpenFile errors,
- // but returns 405 Method Not Allowed if OpenFile()
- // succeeds but Write() or Close() fails. We'd rather
- // have 405. writeFailer ensures Close() fails if the
- // file is opened for writing *or* Write() is called.
- var err error
- if writing {
- err = errReadOnly
- }
- f = writeFailer{File: f, err: err}
- }
- if fs.alwaysReadEOF {
- f = readEOF{File: f}
- }
- return
-}
-
-func (fs *webdavFS) RemoveAll(ctx context.Context, name string) error {
- return fs.collfs.RemoveAll(name)
-}
-
-func (fs *webdavFS) Rename(ctx context.Context, oldName, newName string) error {
- if !fs.writing {
- return errReadOnly
- }
- if strings.HasSuffix(oldName, "/") {
- // WebDAV "MOVE foo/ bar/" means rename foo to bar.
- oldName = oldName[:len(oldName)-1]
- newName = strings.TrimSuffix(newName, "/")
- }
- fs.makeparents(newName)
- return fs.collfs.Rename(oldName, newName)
-}
-
-func (fs *webdavFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
- if fs.writing {
- fs.makeparents(name)
- }
- return fs.collfs.Stat(name)
-}
-
-type writeFailer struct {
- webdav.File
- err error
-}
-
-func (wf writeFailer) Write([]byte) (int, error) {
- wf.err = errReadOnly
- return 0, wf.err
-}
-
-func (wf writeFailer) Close() error {
- err := wf.File.Close()
- if err != nil {
- wf.err = err
- }
- return wf.err
-}
-
-type readEOF struct {
- webdav.File
-}
-
-func (readEOF) Read(p []byte) (int, error) {
- return 0, io.EOF
-}
-
-// noLockSystem implements webdav.LockSystem by returning success for
-// every possible locking operation, even though it has no side
-// effects such as actually locking anything. This works for a
-// read-only webdav filesystem because webdav locks only apply to
-// writes.
-//
-// This is more suitable than webdav.NewMemLS() for two reasons:
-// First, it allows keep-web to use one locker for all collections
-// even though coll1.vhost/foo and coll2.vhost/foo have the same path
-// but represent different resources. Additionally, it returns valid
-// tokens (rfc2518 specifies that tokens are represented as URIs and
-// are unique across all resources for all time), which might improve
-// client compatibility.
-//
-// However, it does also permit impossible operations, like acquiring
-// conflicting locks and releasing non-existent locks. This might
-// confuse some clients if they try to probe for correctness.
-//
-// Currently this is a moot point: the LOCK and UNLOCK methods are not
-// accepted by keep-web, so it suffices to implement the
-// webdav.LockSystem interface.
-type noLockSystem struct{}
-
-func (*noLockSystem) Confirm(time.Time, string, string, ...webdav.Condition) (func(), error) {
- return noop, nil
-}
-
-func (*noLockSystem) Create(now time.Time, details webdav.LockDetails) (token string, err error) {
- return fmt.Sprintf("opaquelocktoken:%s-%x", lockPrefix, atomic.AddInt64(&nextLockSuffix, 1)), nil
-}
-
-func (*noLockSystem) Refresh(now time.Time, token string, duration time.Duration) (webdav.LockDetails, error) {
- return webdav.LockDetails{}, nil
-}
-
-func (*noLockSystem) Unlock(now time.Time, token string) error {
- return nil
-}
-
-func noop() {}
-
-// Return a version 1 variant 4 UUID, meaning all bits are random
-// except the ones indicating the version and variant.
-func uuid() string {
- var data [16]byte
- if _, err := rand.Read(data[:]); err != nil {
- panic(err)
- }
- // variant 1: N=10xx
- data[8] = data[8]&0x3f | 0x80
- // version 4: M=0100
- data[6] = data[6]&0x0f | 0x40
- return fmt.Sprintf("%x-%x-%x-%x-%x", data[0:4], data[4:6], data[6:8], data[8:10], data[10:])
-}
diff --git a/services/keep-web/webdav_test.go b/services/keep-web/webdav_test.go
deleted file mode 100644
index a450906d5f..0000000000
--- a/services/keep-web/webdav_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepweb
-
-import "golang.org/x/net/webdav"
-
-var _ webdav.FileSystem = &webdavFS{}
diff --git a/services/keep-web/writebuffer.go b/services/keep-web/writebuffer.go
new file mode 100644
index 0000000000..90bdcb476b
--- /dev/null
+++ b/services/keep-web/writebuffer.go
@@ -0,0 +1,161 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepweb
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync/atomic"
+)
+
+// writeBuffer uses a ring buffer to implement an asynchronous write
+// buffer.
+//
+// rpos==wpos means the buffer is empty.
+//
+// rpos==(wpos+1)%size means the buffer is full.
+//
+// size<2 means the buffer is always empty and full, so in this case
+// writeBuffer writes through synchronously.
+type writeBuffer struct {
+ out io.Writer
+ buf []byte
+ writesize int // max bytes flush() should write in a single out.Write()
+ wpos atomic.Int64 // index in buf where writer (Write()) will write to next
+ wsignal chan struct{} // receives a value after wpos or closed changes
+ rpos atomic.Int64 // index in buf where reader (flush()) will read from next
+ rsignal chan struct{} // receives a value after rpos or err changes
+ err error // error encountered by flush
+ closed atomic.Bool
+ flushed chan struct{} // closes when flush() is finished
+}
+
+func newWriteBuffer(w io.Writer, size int) *writeBuffer {
+ wb := &writeBuffer{
+ out: w,
+ buf: make([]byte, size),
+ writesize: (size + 63) / 64,
+ wsignal: make(chan struct{}, 1),
+ rsignal: make(chan struct{}, 1),
+ flushed: make(chan struct{}),
+ }
+ go wb.flush()
+ return wb
+}
+
+func (wb *writeBuffer) Close() error {
+ if wb.closed.Load() {
+ return errors.New("writeBuffer: already closed")
+ }
+ wb.closed.Store(true)
+ // wake up flush()
+ select {
+ case wb.wsignal <- struct{}{}:
+ default:
+ }
+ // wait for flush() to finish
+ <-wb.flushed
+ return wb.err
+}
+
+func (wb *writeBuffer) Write(p []byte) (int, error) {
+ if len(wb.buf) < 2 {
+ // Our buffer logic doesn't work with size<2, and such
+ // a tiny buffer has no purpose anyway, so just write
+ // through unbuffered.
+ return wb.out.Write(p)
+ }
+ todo := p
+ wpos := int(wb.wpos.Load())
+ rpos := int(wb.rpos.Load())
+ for len(todo) > 0 {
+ // wait until the buffer is not full.
+ for rpos == (wpos+1)%len(wb.buf) {
+ select {
+ case <-wb.flushed:
+ if wb.err == nil {
+ return 0, errors.New("Write called on closed writeBuffer")
+ }
+ return 0, wb.err
+ case <-wb.rsignal:
+ rpos = int(wb.rpos.Load())
+ }
+ }
+ // determine next contiguous portion of buffer that is
+ // available.
+ var avail []byte
+ if rpos == 0 {
+ avail = wb.buf[wpos : len(wb.buf)-1]
+ } else if wpos >= rpos {
+ avail = wb.buf[wpos:]
+ } else {
+ avail = wb.buf[wpos : rpos-1]
+ }
+ n := copy(avail, todo)
+ wpos = (wpos + n) % len(wb.buf)
+ wb.wpos.Store(int64(wpos))
+ // wake up flush()
+ select {
+ case wb.wsignal <- struct{}{}:
+ default:
+ }
+ todo = todo[n:]
+ }
+ return len(p), nil
+}
+
+func (wb *writeBuffer) flush() {
+ defer close(wb.flushed)
+ rpos := 0
+ wpos := 0
+ closed := false
+ for {
+ // wait until buffer is not empty.
+ for rpos == wpos {
+ if closed {
+ return
+ }
+ <-wb.wsignal
+ closed = wb.closed.Load()
+ wpos = int(wb.wpos.Load())
+ }
+ // determine next contiguous portion of buffer that is
+ // ready to write through.
+ var ready []byte
+ if rpos < wpos {
+ ready = wb.buf[rpos:wpos]
+ } else {
+ ready = wb.buf[rpos:]
+ }
+ if len(ready) > wb.writesize {
+ ready = ready[:wb.writesize]
+ }
+ _, wb.err = wb.out.Write(ready)
+ if wb.err != nil {
+ return
+ }
+ rpos = (rpos + len(ready)) % len(wb.buf)
+ wb.rpos.Store(int64(rpos))
+ select {
+ case wb.rsignal <- struct{}{}:
+ default:
+ }
+ }
+}
+
+// responseWriter enables inserting an io.Writer-wrapper (like
+// *writeBuffer) into an http.ResponseWriter stack.
+//
+// It passes Write() calls to an io.Writer, and all other calls to an
+// http.ResponseWriter.
+type responseWriter struct {
+ io.Writer
+ http.ResponseWriter
+}
+
+func (rwc responseWriter) Write(p []byte) (int, error) {
+ return rwc.Writer.Write(p)
+}
diff --git a/services/keep-web/writebuffer_test.go b/services/keep-web/writebuffer_test.go
new file mode 100644
index 0000000000..589dc241a2
--- /dev/null
+++ b/services/keep-web/writebuffer_test.go
@@ -0,0 +1,98 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepweb
+
+import (
+ "bytes"
+ "io"
+ "math/rand"
+ "time"
+
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&writeBufferSuite{})
+
+type writeBufferSuite struct {
+}
+
+// 1000 / 96.3 ns/op = 10.384 GB/s
+func (s *writeBufferSuite) Benchmark_1KBWrites(c *C) {
+ wb := newWriteBuffer(io.Discard, 1<<20)
+ in := make([]byte, 1000)
+ for i := 0; i < c.N; i++ {
+ wb.Write(in)
+ }
+ wb.Close()
+}
+
+func (s *writeBufferSuite) TestRandomizedSpeedsAndSizes(c *C) {
+ for i := 0; i < 20; i++ {
+ insize := rand.Intn(1 << 26)
+ bufsize := rand.Intn(1 << 26)
+ if i < 2 {
+ // make sure to test edge cases
+ bufsize = i
+ } else if insize/bufsize > 1000 {
+ // don't waste too much time testing tiny
+ // buffer / huge content
+ insize = bufsize*1000 + 123
+ }
+ c.Logf("%s: insize %d bufsize %d", c.TestName(), insize, bufsize)
+
+ in := make([]byte, insize)
+ b := byte(0)
+ for i := range in {
+ in[i] = b
+ b++
+ }
+
+ out := &bytes.Buffer{}
+ done := make(chan struct{})
+ pr, pw := io.Pipe()
+ go func() {
+ n, err := slowCopy(out, pr, rand.Intn(8192)+1)
+ c.Check(err, IsNil)
+ c.Check(n, Equals, int64(insize))
+ close(done)
+ }()
+ wb := newWriteBuffer(pw, bufsize)
+ n, err := slowCopy(wb, bytes.NewBuffer(in), rand.Intn(8192)+1)
+ c.Check(err, IsNil)
+ c.Check(n, Equals, int64(insize))
+ c.Check(wb.Close(), IsNil)
+ c.Check(pw.Close(), IsNil)
+ <-done
+ c.Check(out.Len(), Equals, insize)
+ for i := 0; i < out.Len() && i < len(in); i++ {
+ if out.Bytes()[i] != in[i] {
+ c.Errorf("content mismatch at byte %d", i)
+ break
+ }
+ }
+ }
+}
+
+func slowCopy(dst io.Writer, src io.Reader, bufsize int) (int64, error) {
+ wrote := int64(0)
+ buf := make([]byte, bufsize)
+ for {
+ time.Sleep(time.Duration(rand.Intn(100) + 1))
+ n, err := src.Read(buf)
+ if n > 0 {
+ n, err := dst.Write(buf[:n])
+ wrote += int64(n)
+ if err != nil {
+ return wrote, err
+ }
+ }
+ if err == io.EOF {
+ return wrote, nil
+ }
+ if err != nil {
+ return wrote, err
+ }
+ }
+}
diff --git a/services/keepproxy/keepproxy.go b/services/keepproxy/keepproxy.go
index f857ed3e4e..39ffd45cbe 100644
--- a/services/keepproxy/keepproxy.go
+++ b/services/keepproxy/keepproxy.go
@@ -175,13 +175,18 @@ func (h *proxyHandler) checkAuthorizationHeader(req *http.Request) (pass bool, t
return true, tok, user
}
-// We need to make a private copy of the default http transport early
-// in initialization, then make copies of our private copy later. It
-// won't be safe to copy http.DefaultTransport itself later, because
-// its private mutexes might have already been used. (Without this,
-// the test suite sometimes panics "concurrent map writes" in
-// net/http.(*Transport).removeIdleConnLocked().)
-var defaultTransport = *(http.DefaultTransport.(*http.Transport))
+// We can't copy the default http transport because http.Transport has
+// a mutex field, so we make our own using the values of the exported
+// fields.
+var defaultTransport = http.Transport{
+ Proxy: http.DefaultTransport.(*http.Transport).Proxy,
+ DialContext: http.DefaultTransport.(*http.Transport).DialContext,
+ ForceAttemptHTTP2: http.DefaultTransport.(*http.Transport).ForceAttemptHTTP2,
+ MaxIdleConns: http.DefaultTransport.(*http.Transport).MaxIdleConns,
+ IdleConnTimeout: http.DefaultTransport.(*http.Transport).IdleConnTimeout,
+ TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout,
+ ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout,
+}
type proxyHandler struct {
http.Handler
@@ -195,14 +200,23 @@ type proxyHandler struct {
func newHandler(ctx context.Context, kc *keepclient.KeepClient, timeout time.Duration, cluster *arvados.Cluster) (service.Handler, error) {
rest := mux.NewRouter()
- transport := defaultTransport
- transport.DialContext = (&net.Dialer{
- Timeout: keepclient.DefaultConnectTimeout,
- KeepAlive: keepclient.DefaultKeepAlive,
- DualStack: true,
- }).DialContext
- transport.TLSClientConfig = arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure)
- transport.TLSHandshakeTimeout = keepclient.DefaultTLSHandshakeTimeout
+ // We can't copy the default http transport because
+ // http.Transport has a mutex field, so we copy the fields
+ // that we know have non-zero values in http.DefaultTransport.
+ transport := &http.Transport{
+ Proxy: http.DefaultTransport.(*http.Transport).Proxy,
+ ForceAttemptHTTP2: http.DefaultTransport.(*http.Transport).ForceAttemptHTTP2,
+ MaxIdleConns: http.DefaultTransport.(*http.Transport).MaxIdleConns,
+ IdleConnTimeout: http.DefaultTransport.(*http.Transport).IdleConnTimeout,
+ ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout,
+ DialContext: (&net.Dialer{
+ Timeout: keepclient.DefaultConnectTimeout,
+ KeepAlive: keepclient.DefaultKeepAlive,
+ DualStack: true,
+ }).DialContext,
+ TLSClientConfig: arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure),
+ TLSHandshakeTimeout: keepclient.DefaultTLSHandshakeTimeout,
+ }
cacheQ, err := lru.New2Q(500)
if err != nil {
@@ -213,7 +227,7 @@ func newHandler(ctx context.Context, kc *keepclient.KeepClient, timeout time.Dur
Handler: rest,
KeepClient: kc,
timeout: timeout,
- transport: &transport,
+ transport: transport,
apiTokenCache: &apiTokenCache{
tokens: cacheQ,
expireTime: 300,
@@ -290,7 +304,6 @@ func (h *proxyHandler) Get(resp http.ResponseWriter, req *http.Request) {
var err error
var status int
var expectLength, responseLength int64
- var proxiedURI = "-"
logger := ctxlog.FromContext(req.Context())
defer func() {
@@ -298,7 +311,6 @@ func (h *proxyHandler) Get(resp http.ResponseWriter, req *http.Request) {
"locator": locator,
"expectLength": expectLength,
"responseLength": responseLength,
- "proxiedURI": proxiedURI,
"err": err,
})
if status != http.StatusOK {
@@ -307,6 +319,7 @@ func (h *proxyHandler) Get(resp http.ResponseWriter, req *http.Request) {
}()
kc := h.makeKeepClient(req)
+ kc.DiskCacheSize = keepclient.DiskCacheDisabled
var pass bool
var tok string
@@ -331,9 +344,9 @@ func (h *proxyHandler) Get(resp http.ResponseWriter, req *http.Request) {
switch req.Method {
case "HEAD":
- expectLength, proxiedURI, err = kc.Ask(locator)
+ expectLength, _, err = kc.Ask(locator)
case "GET":
- reader, expectLength, proxiedURI, err = kc.Get(locator)
+ reader, expectLength, _, err = kc.Get(locator)
if reader != nil {
defer reader.Close()
}
@@ -509,9 +522,9 @@ func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
// ServeHTTP implementation for IndexHandler
// Supports only GET requests for /index/{prefix:[0-9a-f]{0,32}}
// For each keep server found in LocalRoots:
-// Invokes GetIndex using keepclient
-// Expects "complete" response (terminating with blank new line)
-// Aborts on any errors
+// - Invokes GetIndex using keepclient
+// - Expects "complete" response (terminating with blank new line)
+// - Aborts on any errors
// Concatenates responses from all those keep servers and returns
func (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {
setCORSHeaders(resp)
@@ -566,7 +579,7 @@ func (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {
}
func (h *proxyHandler) makeKeepClient(req *http.Request) *keepclient.KeepClient {
- kc := *h.KeepClient
+ kc := h.KeepClient.Clone()
kc.RequestID = req.Header.Get("X-Request-Id")
kc.HTTPClient = &proxyClient{
client: &http.Client{
@@ -575,5 +588,5 @@ func (h *proxyHandler) makeKeepClient(req *http.Request) *keepclient.KeepClient
},
proto: req.Proto,
}
- return &kc
+ return kc
}
diff --git a/services/keepproxy/keepproxy_test.go b/services/keepproxy/keepproxy_test.go
index 8242f5b2b5..2c73e2d104 100644
--- a/services/keepproxy/keepproxy_test.go
+++ b/services/keepproxy/keepproxy_test.go
@@ -32,8 +32,8 @@ import (
. "gopkg.in/check.v1"
)
-// Gocheck boilerplate
func Test(t *testing.T) {
+ keepclient.DefaultRetryDelay = time.Millisecond
TestingT(t)
}
@@ -142,11 +142,11 @@ func runProxy(c *C, bogusClientToken bool, loadKeepstoresFromConfig bool, kp *ar
arv.ApiToken = "bogus-token"
}
kc := keepclient.New(arv)
+ kc.DiskCacheSize = keepclient.DiskCacheDisabled
sr := map[string]string{
TestProxyUUID: "http://" + srv.Addr,
}
kc.SetServiceRoots(sr, sr, sr)
- kc.Arvados.External = true
return srv, kc, logbuf
}
@@ -346,7 +346,7 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
}
{
- reader, _, _, err := kc.Get(hash)
+ reader, _, _, err := kc.Get(hash + "+3")
c.Check(reader, Equals, nil)
c.Check(err, Equals, keepclient.BlockNotFound)
c.Log("Finished Get (expected BlockNotFound)")
@@ -407,7 +407,7 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
{
reader, blocklen, _, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e")
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
all, err := ioutil.ReadAll(reader)
c.Check(err, IsNil)
c.Check(all, DeepEquals, []byte(""))
@@ -608,22 +608,22 @@ func (s *ServerRequiredSuite) TestStripHint(c *C) {
}
// Test GetIndex
-// Put one block, with 2 replicas
-// With no prefix (expect the block locator, twice)
-// With an existing prefix (expect the block locator, twice)
-// With a valid but non-existing prefix (expect "\n")
-// With an invalid prefix (expect error)
+// - Put one block, with 2 replicas
+// - With no prefix (expect the block locator, twice)
+// - With an existing prefix (expect the block locator, twice)
+// - With a valid but non-existing prefix (expect "\n")
+// - With an invalid prefix (expect error)
func (s *ServerRequiredSuite) TestGetIndex(c *C) {
getIndexWorker(c, false)
}
// Test GetIndex
-// Uses config.yml
-// Put one block, with 2 replicas
-// With no prefix (expect the block locator, twice)
-// With an existing prefix (expect the block locator, twice)
-// With a valid but non-existing prefix (expect "\n")
-// With an invalid prefix (expect error)
+// - Uses config.yml
+// - Put one block, with 2 replicas
+// - With no prefix (expect the block locator, twice)
+// - With an existing prefix (expect the block locator, twice)
+// - With a valid but non-existing prefix (expect "\n")
+// - With an invalid prefix (expect error)
func (s *ServerRequiredConfigYmlSuite) TestGetIndex(c *C) {
getIndexWorker(c, true)
}
@@ -641,7 +641,7 @@ func getIndexWorker(c *C, useConfig bool) {
c.Check(rep, Equals, 2)
c.Check(err, Equals, nil)
- reader, blocklen, _, err := kc.Get(hash)
+ reader, blocklen, _, err := kc.Get(hash2)
c.Assert(err, IsNil)
c.Check(blocklen, Equals, int64(10))
all, err := ioutil.ReadAll(reader)
@@ -696,7 +696,7 @@ func (s *ServerRequiredSuite) TestCollectionSharingToken(c *C) {
defer srv.Close()
hash, _, err := kc.PutB([]byte("shareddata"))
c.Check(err, IsNil)
- kc.Arvados.ApiToken = arvadostest.FooCollectionSharingToken
+ kc.Arvados.ApiToken = arvadostest.FooFileCollectionSharingToken
rdr, _, _, err := kc.Get(hash)
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(rdr)
@@ -783,10 +783,12 @@ func (s *NoKeepServerSuite) TestAskGetNoKeepServerError(c *C) {
},
} {
err := f()
- c.Assert(err, NotNil)
+ c.Check(err, NotNil)
errNotFound, _ := err.(*keepclient.ErrNotFound)
- c.Check(errNotFound.Temporary(), Equals, true)
- c.Check(err, ErrorMatches, `.*HTTP 502.*`)
+ if c.Check(errNotFound, NotNil) {
+ c.Check(errNotFound.Temporary(), Equals, true)
+ c.Check(err, ErrorMatches, `.*HTTP 502.*`)
+ }
}
}
diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index f9b383e70e..2c8a79350c 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -5,13 +5,11 @@
package keepstore
import (
- "bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
"os"
"regexp"
@@ -32,17 +30,18 @@ func init() {
driver["Azure"] = newAzureBlobVolume
}
-func newAzureBlobVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- v := &AzureBlobVolume{
+func newAzureBlobVolume(params newVolumeParams) (volume, error) {
+ v := &azureBlobVolume{
RequestTimeout: azureDefaultRequestTimeout,
WriteRaceInterval: azureDefaultWriteRaceInterval,
WriteRacePollTime: azureDefaultWriteRacePollTime,
- cluster: cluster,
- volume: volume,
- logger: logger,
- metrics: metrics,
+ cluster: params.Cluster,
+ volume: params.ConfigVolume,
+ logger: params.Logger,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
}
- err := json.Unmarshal(volume.DriverParameters, &v)
+ err := json.Unmarshal(params.ConfigVolume.DriverParameters, &v)
if err != nil {
return nil, err
}
@@ -80,8 +79,8 @@ func newAzureBlobVolume(cluster *arvados.Cluster, volume arvados.Volume, logger
return v, v.check()
}
-func (v *AzureBlobVolume) check() error {
- lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
+func (v *azureBlobVolume) check() error {
+ lbls := prometheus.Labels{"device_id": v.DeviceID()}
v.container.stats.opsCounters, v.container.stats.errCounters, v.container.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
return nil
}
@@ -94,9 +93,9 @@ const (
azureDefaultWriteRacePollTime = arvados.Duration(time.Second)
)
-// An AzureBlobVolume stores and retrieves blocks in an Azure Blob
+// An azureBlobVolume stores and retrieves blocks in an Azure Blob
// container.
-type AzureBlobVolume struct {
+type azureBlobVolume struct {
StorageAccountName string
StorageAccountKey string
StorageBaseURL string // "" means default, "core.windows.net"
@@ -108,12 +107,13 @@ type AzureBlobVolume struct {
WriteRaceInterval arvados.Duration
WriteRacePollTime arvados.Duration
- cluster *arvados.Cluster
- volume arvados.Volume
- logger logrus.FieldLogger
- metrics *volumeMetricsVecs
- azClient storage.Client
- container *azureContainer
+ cluster *arvados.Cluster
+ volume arvados.Volume
+ logger logrus.FieldLogger
+ metrics *volumeMetricsVecs
+ bufferPool *bufferPool
+ azClient storage.Client
+ container *azureContainer
}
// singleSender is a single-attempt storage.Sender.
@@ -124,18 +124,13 @@ func (*singleSender) Send(c *storage.Client, req *http.Request) (resp *http.Resp
return c.HTTPClient.Do(req)
}
-// Type implements Volume.
-func (v *AzureBlobVolume) Type() string {
- return "Azure"
-}
-
-// GetDeviceID returns a globally unique ID for the storage container.
-func (v *AzureBlobVolume) GetDeviceID() string {
+// DeviceID returns a globally unique ID for the storage container.
+func (v *azureBlobVolume) DeviceID() string {
return "azure://" + v.StorageBaseURL + "/" + v.StorageAccountName + "/" + v.ContainerName
}
// Return true if expires_at metadata attribute is found on the block
-func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
+func (v *azureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
metadata, err := v.container.GetBlobMetadata(loc)
if err != nil {
return false, metadata, v.translateError(err)
@@ -146,30 +141,34 @@ func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, err
return false, metadata, nil
}
-// Get reads a Keep block that has been stored as a block blob in the
-// container.
+// BlockRead reads a Keep block that has been stored as a block blob
+// in the container.
//
// If the block is younger than azureWriteRaceInterval and is
-// unexpectedly empty, assume a PutBlob operation is in progress, and
-// wait for it to finish writing.
-func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- trashed, _, err := v.checkTrashed(loc)
+// unexpectedly empty, assume a BlockWrite operation is in progress,
+// and wait for it to finish writing.
+func (v *azureBlobVolume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+ trashed, _, err := v.checkTrashed(hash)
if err != nil {
- return 0, err
+ return err
}
if trashed {
- return 0, os.ErrNotExist
+ return os.ErrNotExist
}
+ buf, err := v.bufferPool.GetContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer v.bufferPool.Put(buf)
var deadline time.Time
- haveDeadline := false
- size, err := v.get(ctx, loc, buf)
- for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" {
+ wrote, err := v.get(ctx, hash, w)
+ for err == nil && wrote == 0 && hash != "d41d8cd98f00b204e9800998ecf8427e" {
// Seeing a brand new empty block probably means we're
// in a race with CreateBlob, which under the hood
// (apparently) does "CreateEmpty" and "CommitData"
// with no additional transaction locking.
- if !haveDeadline {
- t, err := v.Mtime(loc)
+ if deadline.IsZero() {
+ t, err := v.Mtime(hash)
if err != nil {
ctxlog.FromContext(ctx).Print("Got empty block (possible race) but Mtime failed: ", err)
break
@@ -178,25 +177,24 @@ func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int,
if time.Now().After(deadline) {
break
}
- ctxlog.FromContext(ctx).Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", loc, time.Since(t), deadline)
- haveDeadline = true
+ ctxlog.FromContext(ctx).Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", hash, time.Since(t), deadline)
} else if time.Now().After(deadline) {
break
}
select {
case <-ctx.Done():
- return 0, ctx.Err()
+ return ctx.Err()
case <-time.After(v.WriteRacePollTime.Duration()):
}
- size, err = v.get(ctx, loc, buf)
+ wrote, err = v.get(ctx, hash, w)
}
- if haveDeadline {
- ctxlog.FromContext(ctx).Printf("Race ended with size==%d", size)
+ if !deadline.IsZero() {
+ ctxlog.FromContext(ctx).Printf("Race ended with size==%d", wrote)
}
- return size, err
+ return err
}
-func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int, error) {
+func (v *azureBlobVolume) get(ctx context.Context, hash string, dst io.WriterAt) (int, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -206,28 +204,30 @@ func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int,
}
pieces := 1
- expectSize := len(buf)
+ expectSize := BlockSize
+ sizeKnown := false
if pieceSize < BlockSize {
- // Unfortunately the handler doesn't tell us how long the blob
- // is expected to be, so we have to ask Azure.
- props, err := v.container.GetBlobProperties(loc)
+ // Unfortunately the handler doesn't tell us how long
+ // the blob is expected to be, so we have to ask
+ // Azure.
+ props, err := v.container.GetBlobProperties(hash)
if err != nil {
return 0, v.translateError(err)
}
if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
- return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
+ return 0, fmt.Errorf("block %s invalid size %d (max %d)", hash, props.ContentLength, BlockSize)
}
expectSize = int(props.ContentLength)
pieces = (expectSize + pieceSize - 1) / pieceSize
+ sizeKnown = true
}
if expectSize == 0 {
return 0, nil
}
- // We'll update this actualSize if/when we get the last piece.
- actualSize := -1
errors := make(chan error, pieces)
+ var wrote atomic.Int64
var wg sync.WaitGroup
wg.Add(pieces)
for p := 0; p < pieces; p++ {
@@ -252,9 +252,9 @@ func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int,
go func() {
defer close(gotRdr)
if startPos == 0 && endPos == expectSize {
- rdr, err = v.container.GetBlob(loc)
+ rdr, err = v.container.GetBlob(hash)
} else {
- rdr, err = v.container.GetBlobRange(loc, startPos, endPos-1, nil)
+ rdr, err = v.container.GetBlobRange(hash, startPos, endPos-1, nil)
}
}()
select {
@@ -282,86 +282,44 @@ func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int,
<-ctx.Done()
rdr.Close()
}()
- n, err := io.ReadFull(rdr, buf[startPos:endPos])
- if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
+ n, err := io.CopyN(io.NewOffsetWriter(dst, int64(startPos)), rdr, int64(endPos-startPos))
+ wrote.Add(n)
+ if pieces == 1 && !sizeKnown && (err == io.ErrUnexpectedEOF || err == io.EOF) {
// If we don't know the actual size,
// and just tried reading 64 MiB, it's
// normal to encounter EOF.
} else if err != nil {
- if ctx.Err() == nil {
- errors <- err
- }
+ errors <- err
cancel()
return
}
- if p == pieces-1 {
- actualSize = startPos + n
- }
}(p)
}
wg.Wait()
close(errors)
if len(errors) > 0 {
- return 0, v.translateError(<-errors)
- }
- if ctx.Err() != nil {
- return 0, ctx.Err()
+ return int(wrote.Load()), v.translateError(<-errors)
}
- return actualSize, nil
+ return int(wrote.Load()), ctx.Err()
}
-// Compare the given data with existing stored data.
-func (v *AzureBlobVolume) Compare(ctx context.Context, loc string, expect []byte) error {
- trashed, _, err := v.checkTrashed(loc)
- if err != nil {
- return err
- }
- if trashed {
- return os.ErrNotExist
- }
- var rdr io.ReadCloser
- gotRdr := make(chan struct{})
- go func() {
- defer close(gotRdr)
- rdr, err = v.container.GetBlob(loc)
- }()
- select {
- case <-ctx.Done():
- go func() {
- <-gotRdr
- if err == nil {
- rdr.Close()
- }
- }()
- return ctx.Err()
- case <-gotRdr:
- }
- if err != nil {
- return v.translateError(err)
- }
- defer rdr.Close()
- return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
-}
-
-// Put stores a Keep block as a block blob in the container.
-func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
+// BlockWrite stores a block on the volume. If it already exists, its
+// timestamp is updated.
+func (v *azureBlobVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
// Send the block data through a pipe, so that (if we need to)
// we can close the pipe early and abandon our
// CreateBlockBlobFromReader() goroutine, without worrying
- // about CreateBlockBlobFromReader() accessing our block
+ // about CreateBlockBlobFromReader() accessing our data
// buffer after we release it.
bufr, bufw := io.Pipe()
go func() {
- io.Copy(bufw, bytes.NewReader(block))
+ bufw.Write(data)
bufw.Close()
}()
- errChan := make(chan error)
+ errChan := make(chan error, 1)
go func() {
var body io.Reader = bufr
- if len(block) == 0 {
+ if len(data) == 0 {
// We must send a "Content-Length: 0" header,
// but the http client interprets
// ContentLength==0 as "unknown" unless it can
@@ -370,18 +328,15 @@ func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) err
body = http.NoBody
bufr.Close()
}
- errChan <- v.container.CreateBlockBlobFromReader(loc, len(block), body, nil)
+ errChan <- v.container.CreateBlockBlobFromReader(hash, len(data), body, nil)
}()
select {
case <-ctx.Done():
ctxlog.FromContext(ctx).Debugf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
- // Our pipe might be stuck in Write(), waiting for
- // io.Copy() to read. If so, un-stick it. This means
- // CreateBlockBlobFromReader will get corrupt data,
- // but that's OK: the size won't match, so the write
- // will fail.
- go io.Copy(ioutil.Discard, bufr)
- // CloseWithError() will return once pending I/O is done.
+ // bufw.CloseWithError() interrupts bufw.Write() if
+ // necessary, ensuring CreateBlockBlobFromReader can't
+ // read any more of our data slice via bufr after we
+ // return.
bufw.CloseWithError(ctx.Err())
ctxlog.FromContext(ctx).Debugf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
return ctx.Err()
@@ -390,12 +345,9 @@ func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) err
}
}
-// Touch updates the last-modified property of a block blob.
-func (v *AzureBlobVolume) Touch(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- trashed, metadata, err := v.checkTrashed(loc)
+// BlockTouch updates the last-modified property of a block blob.
+func (v *azureBlobVolume) BlockTouch(hash string) error {
+ trashed, metadata, err := v.checkTrashed(hash)
if err != nil {
return err
}
@@ -404,12 +356,12 @@ func (v *AzureBlobVolume) Touch(loc string) error {
}
metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
- return v.container.SetBlobMetadata(loc, metadata, nil)
+ return v.container.SetBlobMetadata(hash, metadata, nil)
}
// Mtime returns the last-modified property of a block blob.
-func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
- trashed, _, err := v.checkTrashed(loc)
+func (v *azureBlobVolume) Mtime(hash string) (time.Time, error) {
+ trashed, _, err := v.checkTrashed(hash)
if err != nil {
return time.Time{}, err
}
@@ -417,21 +369,25 @@ func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
return time.Time{}, os.ErrNotExist
}
- props, err := v.container.GetBlobProperties(loc)
+ props, err := v.container.GetBlobProperties(hash)
if err != nil {
return time.Time{}, err
}
return time.Time(props.LastModified), nil
}
-// IndexTo writes a list of Keep blocks that are stored in the
+// Index writes a list of Keep blocks that are stored in the
// container.
-func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
+func (v *azureBlobVolume) Index(ctx context.Context, prefix string, writer io.Writer) error {
params := storage.ListBlobsParameters{
Prefix: prefix,
Include: &storage.IncludeBlobDataset{Metadata: true},
}
for page := 1; ; page++ {
+ err := ctx.Err()
+ if err != nil {
+ return err
+ }
resp, err := v.listBlobs(page, params)
if err != nil {
return err
@@ -463,11 +419,11 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
}
// call v.container.ListBlobs, retrying if needed.
-func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
+func (v *azureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
for i := 0; i < v.ListBlobsMaxAttempts; i++ {
resp, err = v.container.ListBlobs(params)
err = v.translateError(err)
- if err == VolumeBusyError {
+ if err == errVolumeUnavailable {
v.logger.Printf("ListBlobs: will retry page %d in %s after error: %s", page, v.ListBlobsRetryDelay, err)
time.Sleep(time.Duration(v.ListBlobsRetryDelay))
continue
@@ -479,11 +435,7 @@ func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters
}
// Trash a Keep block.
-func (v *AzureBlobVolume) Trash(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
-
+func (v *azureBlobVolume) BlockTrash(loc string) error {
// Ideally we would use If-Unmodified-Since, but that
// particular condition seems to be ignored by Azure. Instead,
// we get the Etag before checking Mtime, and use If-Match to
@@ -514,11 +466,11 @@ func (v *AzureBlobVolume) Trash(loc string) error {
})
}
-// Untrash a Keep block.
-// Delete the expires_at metadata attribute
-func (v *AzureBlobVolume) Untrash(loc string) error {
+// BlockUntrash deletes the expires_at metadata attribute for the
+// specified block blob.
+func (v *azureBlobVolume) BlockUntrash(hash string) error {
// if expires_at does not exist, return NotFoundError
- metadata, err := v.container.GetBlobMetadata(loc)
+ metadata, err := v.container.GetBlobMetadata(hash)
if err != nil {
return v.translateError(err)
}
@@ -528,33 +480,19 @@ func (v *AzureBlobVolume) Untrash(loc string) error {
// reset expires_at metadata attribute
metadata["expires_at"] = ""
- err = v.container.SetBlobMetadata(loc, metadata, nil)
+ err = v.container.SetBlobMetadata(hash, metadata, nil)
return v.translateError(err)
}
-// Status returns a VolumeStatus struct with placeholder data.
-func (v *AzureBlobVolume) Status() *VolumeStatus {
- return &VolumeStatus{
- DeviceNum: 1,
- BytesFree: BlockSize * 1000,
- BytesUsed: 1,
- }
-}
-
-// String returns a volume label, including the container name.
-func (v *AzureBlobVolume) String() string {
- return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
-}
-
// If possible, translate an Azure SDK error to a recognizable error
// like os.ErrNotExist.
-func (v *AzureBlobVolume) translateError(err error) error {
+func (v *azureBlobVolume) translateError(err error) error {
switch {
case err == nil:
return err
case strings.Contains(err.Error(), "StatusCode=503"):
// "storage: service returned error: StatusCode=503, ErrorCode=ServerBusy, ErrorMessage=The server is busy" (See #14804)
- return VolumeBusyError
+ return errVolumeUnavailable
case strings.Contains(err.Error(), "Not Found"):
// "storage: service returned without a response body (404 Not Found)"
return os.ErrNotExist
@@ -568,17 +506,13 @@ func (v *AzureBlobVolume) translateError(err error) error {
var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
-func (v *AzureBlobVolume) isKeepBlock(s string) bool {
+func (v *azureBlobVolume) isKeepBlock(s string) bool {
return keepBlockRegexp.MatchString(s)
}
// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
// and deletes them from the volume.
-func (v *AzureBlobVolume) EmptyTrash() {
- if v.cluster.Collections.BlobDeleteConcurrency < 1 {
- return
- }
-
+func (v *azureBlobVolume) EmptyTrash() {
var bytesDeleted, bytesInTrash int64
var blocksDeleted, blocksInTrash int64
@@ -642,11 +576,11 @@ func (v *AzureBlobVolume) EmptyTrash() {
close(todo)
wg.Wait()
- v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+ v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
}
// InternalStats returns bucket I/O and API call counters.
-func (v *AzureBlobVolume) InternalStats() interface{} {
+func (v *azureBlobVolume) InternalStats() interface{} {
return &v.container.stats
}
@@ -713,7 +647,7 @@ func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
b := c.ctr.GetBlobReference(bname)
rdr, err := b.Get(nil)
c.stats.TickErr(err)
- return NewCountingReader(rdr, c.stats.TickInBytes), err
+ return newCountingReader(rdr, c.stats.TickInBytes), err
}
func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
@@ -728,7 +662,7 @@ func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storag
GetBlobOptions: opts,
})
c.stats.TickErr(err)
- return NewCountingReader(rdr, c.stats.TickInBytes), err
+ return newCountingReader(rdr, c.stats.TickInBytes), err
}
// If we give it an io.Reader that doesn't also have a Len() int
@@ -749,7 +683,7 @@ func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr i
c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
if size != 0 {
rdr = &readerWithAzureLen{
- Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
+ Reader: newCountingReader(rdr, c.stats.TickOutBytes),
len: size,
}
}
diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
index 48d58ee9bf..b8acd980a1 100644
--- a/services/keepstore/azure_blob_volume_test.go
+++ b/services/keepstore/azure_blob_volume_test.go
@@ -87,7 +87,7 @@ func (h *azStubHandler) TouchWithDate(container, hash string, t time.Time) {
blob.Mtime = t
}
-func (h *azStubHandler) PutRaw(container, hash string, data []byte) {
+func (h *azStubHandler) BlockWriteRaw(container, hash string, data []byte) {
h.Lock()
defer h.Unlock()
h.blobs[container+"|"+hash] = &azBlob{
@@ -221,7 +221,7 @@ func (h *azStubHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusCreated)
case r.Method == "PUT" && r.Form.Get("comp") == "metadata":
// "Set Metadata Headers" API. We don't bother
- // stubbing "Get Metadata Headers": AzureBlobVolume
+ // stubbing "Get Metadata Headers": azureBlobVolume
// sets metadata headers only as a way to bump Etag
// and Last-Modified.
if !blobExists {
@@ -365,14 +365,14 @@ func (d *azStubDialer) Dial(network, address string) (net.Conn, error) {
return d.Dialer.Dial(network, address)
}
-type TestableAzureBlobVolume struct {
- *AzureBlobVolume
+type testableAzureBlobVolume struct {
+ *azureBlobVolume
azHandler *azStubHandler
azStub *httptest.Server
t TB
}
-func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs) *TestableAzureBlobVolume {
+func (s *stubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolumeParams) *testableAzureBlobVolume {
azHandler := newAzStubHandler(t.(*check.C))
azStub := httptest.NewServer(azHandler)
@@ -396,7 +396,7 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvado
azClient.Sender = &singleSender{}
bs := azClient.GetBlobService()
- v := &AzureBlobVolume{
+ v := &azureBlobVolume{
ContainerName: container,
WriteRaceInterval: arvados.Duration(time.Millisecond),
WriteRacePollTime: arvados.Duration(time.Nanosecond),
@@ -404,65 +404,72 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvado
ListBlobsRetryDelay: arvados.Duration(time.Millisecond),
azClient: azClient,
container: &azureContainer{ctr: bs.GetContainerReference(container)},
- cluster: cluster,
- volume: volume,
+ cluster: params.Cluster,
+ volume: params.ConfigVolume,
logger: ctxlog.TestLogger(t),
- metrics: metrics,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
}
if err = v.check(); err != nil {
t.Fatal(err)
}
- return &TestableAzureBlobVolume{
- AzureBlobVolume: v,
+ return &testableAzureBlobVolume{
+ azureBlobVolume: v,
azHandler: azHandler,
azStub: azStub,
t: t,
}
}
-var _ = check.Suite(&StubbedAzureBlobSuite{})
+var _ = check.Suite(&stubbedAzureBlobSuite{})
-type StubbedAzureBlobSuite struct {
+type stubbedAzureBlobSuite struct {
origHTTPTransport http.RoundTripper
}
-func (s *StubbedAzureBlobSuite) SetUpTest(c *check.C) {
+func (s *stubbedAzureBlobSuite) SetUpSuite(c *check.C) {
s.origHTTPTransport = http.DefaultTransport
http.DefaultTransport = &http.Transport{
Dial: (&azStubDialer{logger: ctxlog.TestLogger(c)}).Dial,
}
}
-func (s *StubbedAzureBlobSuite) TearDownTest(c *check.C) {
+func (s *stubbedAzureBlobSuite) TearDownSuite(c *check.C) {
http.DefaultTransport = s.origHTTPTransport
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableAzureBlobVolume(t, cluster, volume, metrics)
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
+ DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+ return s.newTestableAzureBlobVolume(t, params)
})
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
// Test (BlockSize mod azureMaxGetBytes)==0 and !=0 cases
- for _, b := range []int{2 << 22, 2<<22 - 1} {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- v := s.newTestableAzureBlobVolume(t, cluster, volume, metrics)
+ for _, b := range []int{2<<22 - 1, 2<<22 - 1} {
+ c.Logf("=== MaxGetBytes=%d", b)
+ DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+ v := s.newTestableAzureBlobVolume(t, params)
v.MaxGetBytes = b
return v
})
}
}
-func (s *StubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
- DoGenericVolumeTests(c, false, func(c TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableAzureBlobVolume(c, cluster, volume, metrics)
+func (s *stubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
+ DoGenericVolumeTests(c, false, func(c TB, params newVolumeParams) TestableVolume {
+ return s.newTestableAzureBlobVolume(c, params)
})
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
- v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
+ v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+ Cluster: testCluster(c),
+ ConfigVolume: arvados.Volume{Replication: 3},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ })
defer v.Teardown()
for _, size := range []int{
@@ -478,27 +485,30 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
data[i] = byte((i + 7) & 0xff)
}
hash := fmt.Sprintf("%x", md5.Sum(data))
- err := v.Put(context.Background(), hash, data)
+ err := v.BlockWrite(context.Background(), hash, data)
if err != nil {
c.Error(err)
}
- gotData := make([]byte, len(data))
- gotLen, err := v.Get(context.Background(), hash, gotData)
+ gotData := &brbuffer{}
+ err = v.BlockRead(context.Background(), hash, gotData)
if err != nil {
c.Error(err)
}
- gotHash := fmt.Sprintf("%x", md5.Sum(gotData))
- if gotLen != size {
- c.Errorf("length mismatch: got %d != %d", gotLen, size)
- }
+ gotHash := fmt.Sprintf("%x", md5.Sum(gotData.Bytes()))
+ c.Check(gotData.Len(), check.Equals, size)
if gotHash != hash {
c.Errorf("hash mismatch: got %s != %s", gotHash, hash)
}
}
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
- v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
+ v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+ Cluster: testCluster(c),
+ ConfigVolume: arvados.Volume{Replication: 3},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ })
defer v.Teardown()
var wg sync.WaitGroup
@@ -508,42 +518,46 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
wg.Add(1)
go func() {
defer wg.Done()
- err := v.Put(context.Background(), TestHash, TestBlock)
+ err := v.BlockWrite(context.Background(), TestHash, TestBlock)
if err != nil {
c.Error(err)
}
}()
- continuePut := make(chan struct{})
- // Wait for the stub's Put to create the empty blob
- v.azHandler.race <- continuePut
+ continueBlockWrite := make(chan struct{})
+ // Wait for the stub's BlockWrite to create the empty blob
+ v.azHandler.race <- continueBlockWrite
wg.Add(1)
go func() {
defer wg.Done()
- buf := make([]byte, len(TestBlock))
- _, err := v.Get(context.Background(), TestHash, buf)
+ err := v.BlockRead(context.Background(), TestHash, brdiscard)
if err != nil {
c.Error(err)
}
}()
- // Wait for the stub's Get to get the empty blob
+ // Wait for the stub's BlockRead to get the empty blob
close(v.azHandler.race)
- // Allow stub's Put to continue, so the real data is ready
- // when the volume's Get retries
- <-continuePut
- // Wait for Get() and Put() to finish
+ // Allow stub's BlockWrite to continue, so the real data is ready
+ // when the volume's BlockRead retries
+ <-continueBlockWrite
+ // Wait for BlockRead() and BlockWrite() to finish
wg.Wait()
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
- v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
- v.AzureBlobVolume.WriteRaceInterval.Set("2s")
- v.AzureBlobVolume.WriteRacePollTime.Set("5ms")
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
+ v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+ Cluster: testCluster(c),
+ ConfigVolume: arvados.Volume{Replication: 3},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ })
+ v.azureBlobVolume.WriteRaceInterval.Set("2s")
+ v.azureBlobVolume.WriteRacePollTime.Set("5ms")
defer v.Teardown()
- v.PutRaw(TestHash, nil)
+ v.BlockWriteRaw(TestHash, nil)
buf := new(bytes.Buffer)
- v.IndexTo("", buf)
+ v.Index(context.Background(), "", buf)
if buf.Len() != 0 {
c.Errorf("Index %+q should be empty", buf.Bytes())
}
@@ -553,52 +567,47 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *che
allDone := make(chan struct{})
go func() {
defer close(allDone)
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, buf)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
c.Error(err)
return
}
- if n != 0 {
- c.Errorf("Got %+q, expected empty buf", buf[:n])
- }
+ c.Check(buf.String(), check.Equals, "")
}()
select {
case <-allDone:
case <-time.After(time.Second):
- c.Error("Get should have stopped waiting for race when block was 2s old")
+ c.Error("BlockRead should have stopped waiting for race when block was 2s old")
}
buf.Reset()
- v.IndexTo("", buf)
+ v.Index(context.Background(), "", buf)
if !bytes.HasPrefix(buf.Bytes(), []byte(TestHash+"+0")) {
c.Errorf("Index %+q should have %+q", buf.Bytes(), TestHash+"+0")
}
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelGet(c *check.C) {
- s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
- v.PutRaw(TestHash, TestBlock)
- _, err := v.Get(ctx, TestHash, make([]byte, BlockSize))
- return err
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockRead(c *check.C) {
+ s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *testableAzureBlobVolume) error {
+ v.BlockWriteRaw(TestHash, TestBlock)
+ return v.BlockRead(ctx, TestHash, brdiscard)
})
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelPut(c *check.C) {
- s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
- return v.Put(ctx, TestHash, make([]byte, BlockSize))
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockWrite(c *check.C) {
+ s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *testableAzureBlobVolume) error {
+ return v.BlockWrite(ctx, TestHash, make([]byte, BlockSize))
})
}
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelCompare(c *check.C) {
- s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
- v.PutRaw(TestHash, TestBlock)
- return v.Compare(ctx, TestHash, TestBlock2)
+func (s *stubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *testableAzureBlobVolume) error) {
+ v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+ Cluster: testCluster(c),
+ ConfigVolume: arvados.Volume{Replication: 3},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
})
-}
-
-func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *TestableAzureBlobVolume) error) {
- v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
defer v.Teardown()
v.azHandler.race = make(chan chan struct{})
@@ -633,8 +642,13 @@ func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, tes
}()
}
-func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
- volume := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+func (s *stubbedAzureBlobSuite) TestStats(c *check.C) {
+ volume := s.newTestableAzureBlobVolume(c, newVolumeParams{
+ Cluster: testCluster(c),
+ ConfigVolume: arvados.Volume{Replication: 3},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ })
defer volume.Teardown()
stats := func() string {
@@ -647,38 +661,38 @@ func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- _, err := volume.Get(context.Background(), loc, make([]byte, 3))
+ err := volume.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.NotNil)
c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
c.Check(stats(), check.Matches, `.*"storage\.AzureStorageServiceError 404 \(404 Not Found\)":[^0].*`)
c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
- err = volume.Put(context.Background(), loc, []byte("foo"))
+ err = volume.BlockWrite(context.Background(), loc, []byte("foo"))
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
- _, err = volume.Get(context.Background(), loc, make([]byte, 3))
+ err = volume.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.IsNil)
- _, err = volume.Get(context.Background(), loc, make([]byte, 3))
+ err = volume.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
}
-func (v *TestableAzureBlobVolume) PutRaw(locator string, data []byte) {
- v.azHandler.PutRaw(v.ContainerName, locator, data)
+func (v *testableAzureBlobVolume) BlockWriteRaw(locator string, data []byte) {
+ v.azHandler.BlockWriteRaw(v.ContainerName, locator, data)
}
-func (v *TestableAzureBlobVolume) TouchWithDate(locator string, lastPut time.Time) {
- v.azHandler.TouchWithDate(v.ContainerName, locator, lastPut)
+func (v *testableAzureBlobVolume) TouchWithDate(locator string, lastBlockWrite time.Time) {
+ v.azHandler.TouchWithDate(v.ContainerName, locator, lastBlockWrite)
}
-func (v *TestableAzureBlobVolume) Teardown() {
+func (v *testableAzureBlobVolume) Teardown() {
v.azStub.Close()
}
-func (v *TestableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
return "get", "create"
}
diff --git a/services/keepstore/bufferpool.go b/services/keepstore/bufferpool.go
index b4cc5d38e1..811715b191 100644
--- a/services/keepstore/bufferpool.go
+++ b/services/keepstore/bufferpool.go
@@ -5,13 +5,17 @@
package keepstore
import (
+ "context"
"sync"
"sync/atomic"
"time"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
+var bufferPoolBlockSize = BlockSize // modified by tests
+
type bufferPool struct {
log logrus.FieldLogger
// limiter has a "true" placeholder for each in-use buffer.
@@ -22,17 +26,67 @@ type bufferPool struct {
sync.Pool
}
-func newBufferPool(log logrus.FieldLogger, count int, bufSize int) *bufferPool {
+func newBufferPool(log logrus.FieldLogger, count int, reg *prometheus.Registry) *bufferPool {
p := bufferPool{log: log}
p.Pool.New = func() interface{} {
- atomic.AddUint64(&p.allocated, uint64(bufSize))
- return make([]byte, bufSize)
+ atomic.AddUint64(&p.allocated, uint64(bufferPoolBlockSize))
+ return make([]byte, bufferPoolBlockSize)
}
p.limiter = make(chan bool, count)
+ if reg != nil {
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "bufferpool_allocated_bytes",
+ Help: "Number of bytes allocated to buffers",
+ },
+ func() float64 { return float64(p.Alloc()) },
+ ))
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "bufferpool_max_buffers",
+ Help: "Maximum number of buffers allowed",
+ },
+ func() float64 { return float64(p.Cap()) },
+ ))
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "bufferpool_inuse_buffers",
+ Help: "Number of buffers in use",
+ },
+ func() float64 { return float64(p.Len()) },
+ ))
+ }
return &p
}
-func (p *bufferPool) Get(size int) []byte {
+// GetContext gets a buffer from the pool -- but gives up and returns
+// ctx.Err() if ctx ends before a buffer is available.
+func (p *bufferPool) GetContext(ctx context.Context) ([]byte, error) {
+ bufReady := make(chan []byte)
+ go func() {
+ bufReady <- p.Get()
+ }()
+ select {
+ case buf := <-bufReady:
+ return buf, nil
+ case <-ctx.Done():
+ go func() {
+ // Even if closeNotifier happened first, we
+ // need to keep waiting for our buf so we can
+ // return it to the pool.
+ p.Put(<-bufReady)
+ }()
+ return nil, ctx.Err()
+ }
+}
+
+func (p *bufferPool) Get() []byte {
select {
case p.limiter <- true:
default:
@@ -42,14 +96,14 @@ func (p *bufferPool) Get(size int) []byte {
p.log.Printf("waited %v for a buffer", time.Since(t0))
}
buf := p.Pool.Get().([]byte)
- if cap(buf) < size {
- p.log.Fatalf("bufferPool Get(size=%d) but max=%d", size, cap(buf))
+ if len(buf) < bufferPoolBlockSize {
+ p.log.Fatalf("bufferPoolBlockSize=%d but cap(buf)=%d", bufferPoolBlockSize, len(buf))
}
- return buf[:size]
+ return buf
}
func (p *bufferPool) Put(buf []byte) {
- p.Pool.Put(buf)
+ p.Pool.Put(buf[:cap(buf)])
<-p.limiter
}
diff --git a/services/keepstore/bufferpool_test.go b/services/keepstore/bufferpool_test.go
index 13e1cb4f33..8ecc833228 100644
--- a/services/keepstore/bufferpool_test.go
+++ b/services/keepstore/bufferpool_test.go
@@ -5,55 +5,54 @@
package keepstore
import (
- "context"
"time"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/prometheus/client_golang/prometheus"
. "gopkg.in/check.v1"
)
var _ = Suite(&BufferPoolSuite{})
+var bufferPoolTestSize = 10
+
type BufferPoolSuite struct{}
-// Initialize a default-sized buffer pool for the benefit of test
-// suites that don't run main().
-func init() {
- bufs = newBufferPool(ctxlog.FromContext(context.Background()), 12, BlockSize)
+func (s *BufferPoolSuite) SetUpTest(c *C) {
+ bufferPoolBlockSize = bufferPoolTestSize
}
-// Restore sane default after bufferpool's own tests
func (s *BufferPoolSuite) TearDownTest(c *C) {
- bufs = newBufferPool(ctxlog.FromContext(context.Background()), 12, BlockSize)
+ bufferPoolBlockSize = BlockSize
}
func (s *BufferPoolSuite) TestBufferPoolBufSize(c *C) {
- bufs := newBufferPool(ctxlog.TestLogger(c), 2, 10)
- b1 := bufs.Get(1)
- bufs.Get(2)
+ bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
+ b1 := bufs.Get()
+ bufs.Get()
bufs.Put(b1)
- b3 := bufs.Get(3)
- c.Check(len(b3), Equals, 3)
+ b3 := bufs.Get()
+ c.Check(len(b3), Equals, bufferPoolTestSize)
}
func (s *BufferPoolSuite) TestBufferPoolUnderLimit(c *C) {
- bufs := newBufferPool(ctxlog.TestLogger(c), 3, 10)
- b1 := bufs.Get(10)
- bufs.Get(10)
+ bufs := newBufferPool(ctxlog.TestLogger(c), 3, prometheus.NewRegistry())
+ b1 := bufs.Get()
+ bufs.Get()
testBufferPoolRace(c, bufs, b1, "Get")
}
func (s *BufferPoolSuite) TestBufferPoolAtLimit(c *C) {
- bufs := newBufferPool(ctxlog.TestLogger(c), 2, 10)
- b1 := bufs.Get(10)
- bufs.Get(10)
+ bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
+ b1 := bufs.Get()
+ bufs.Get()
testBufferPoolRace(c, bufs, b1, "Put")
}
func testBufferPoolRace(c *C, bufs *bufferPool, unused []byte, expectWin string) {
race := make(chan string)
go func() {
- bufs.Get(10)
+ bufs.Get()
time.Sleep(time.Millisecond)
race <- "Get"
}()
@@ -68,9 +67,9 @@ func testBufferPoolRace(c *C, bufs *bufferPool, unused []byte, expectWin string)
}
func (s *BufferPoolSuite) TestBufferPoolReuse(c *C) {
- bufs := newBufferPool(ctxlog.TestLogger(c), 2, 10)
- bufs.Get(10)
- last := bufs.Get(10)
+ bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
+ bufs.Get()
+ last := bufs.Get()
// The buffer pool is allowed to throw away unused buffers
// (e.g., during sync.Pool's garbage collection hook, in the
// the current implementation). However, if unused buffers are
@@ -81,7 +80,7 @@ func (s *BufferPoolSuite) TestBufferPoolReuse(c *C) {
reuses := 0
for i := 0; i < allocs; i++ {
bufs.Put(last)
- next := bufs.Get(10)
+ next := bufs.Get()
copy(last, []byte("last"))
copy(next, []byte("next"))
if last[0] == 'n' {
diff --git a/services/keepstore/collision.go b/services/keepstore/collision.go
deleted file mode 100644
index 16f2d09232..0000000000
--- a/services/keepstore/collision.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "bytes"
- "context"
- "crypto/md5"
- "fmt"
- "io"
-)
-
-// Compute the MD5 digest of a data block (consisting of buf1 + buf2 +
-// all bytes readable from rdr). If all data is read successfully,
-// return DiskHashError or CollisionError depending on whether it
-// matches expectMD5. If an error occurs while reading, return that
-// error.
-//
-// "content has expected MD5" is called a collision because this
-// function is used in cases where we have another block in hand with
-// the given MD5 but different content.
-func collisionOrCorrupt(expectMD5 string, buf1, buf2 []byte, rdr io.Reader) error {
- outcome := make(chan error)
- data := make(chan []byte, 1)
- go func() {
- h := md5.New()
- for b := range data {
- h.Write(b)
- }
- if fmt.Sprintf("%x", h.Sum(nil)) == expectMD5 {
- outcome <- CollisionError
- } else {
- outcome <- DiskHashError
- }
- }()
- data <- buf1
- if buf2 != nil {
- data <- buf2
- }
- var err error
- for rdr != nil && err == nil {
- buf := make([]byte, 1<<18)
- var n int
- n, err = rdr.Read(buf)
- data <- buf[:n]
- }
- close(data)
- if rdr != nil && err != io.EOF {
- <-outcome
- return err
- }
- return <-outcome
-}
-
-func compareReaderWithBuf(ctx context.Context, rdr io.Reader, expect []byte, hash string) error {
- bufLen := 1 << 20
- if bufLen > len(expect) && len(expect) > 0 {
- // No need for bufLen to be longer than
- // expect, except that len(buf)==0 would
- // prevent us from handling empty readers the
- // same way as non-empty readers: reading 0
- // bytes at a time never reaches EOF.
- bufLen = len(expect)
- }
- buf := make([]byte, bufLen)
- cmp := expect
-
- // Loop invariants: all data read so far matched what
- // we expected, and the first N bytes of cmp are
- // expected to equal the next N bytes read from
- // rdr.
- for {
- ready := make(chan bool)
- var n int
- var err error
- go func() {
- n, err = rdr.Read(buf)
- close(ready)
- }()
- select {
- case <-ready:
- case <-ctx.Done():
- return ctx.Err()
- }
- if n > len(cmp) || bytes.Compare(cmp[:n], buf[:n]) != 0 {
- return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], buf[:n], rdr)
- }
- cmp = cmp[n:]
- if err == io.EOF {
- if len(cmp) != 0 {
- return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], nil, nil)
- }
- return nil
- } else if err != nil {
- return err
- }
- }
-}
diff --git a/services/keepstore/collision_test.go b/services/keepstore/collision_test.go
deleted file mode 100644
index aa8f0cbaa1..0000000000
--- a/services/keepstore/collision_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "bytes"
- "testing/iotest"
-
- check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&CollisionSuite{})
-
-type CollisionSuite struct{}
-
-func (s *CollisionSuite) TestCollisionOrCorrupt(c *check.C) {
- fooMD5 := "acbd18db4cc2f85cedef654fccc4a4d8"
-
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, []byte{'o'}, bytes.NewBufferString("o")),
- check.Equals, CollisionError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, nil, bytes.NewBufferString("oo")),
- check.Equals, CollisionError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, []byte{'o', 'o'}, nil),
- check.Equals, CollisionError)
- c.Check(collisionOrCorrupt(fooMD5, nil, []byte{}, bytes.NewBufferString("foo")),
- check.Equals, CollisionError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o', 'o'}, nil, bytes.NewBufferString("")),
- check.Equals, CollisionError)
- c.Check(collisionOrCorrupt(fooMD5, nil, nil, iotest.NewReadLogger("foo: ", iotest.DataErrReader(iotest.OneByteReader(bytes.NewBufferString("foo"))))),
- check.Equals, CollisionError)
-
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o', 'o'}, nil, bytes.NewBufferString("bar")),
- check.Equals, DiskHashError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, nil, nil),
- check.Equals, DiskHashError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{}, nil, bytes.NewBufferString("")),
- check.Equals, DiskHashError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'O'}, nil, bytes.NewBufferString("o")),
- check.Equals, DiskHashError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'O', 'o'}, nil, nil),
- check.Equals, DiskHashError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, []byte{'O'}, nil),
- check.Equals, DiskHashError)
- c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, nil, bytes.NewBufferString("O")),
- check.Equals, DiskHashError)
-
- c.Check(collisionOrCorrupt(fooMD5, []byte{}, nil, iotest.TimeoutReader(iotest.OneByteReader(bytes.NewBufferString("foo")))),
- check.Equals, iotest.ErrTimeout)
-}
diff --git a/services/keepstore/command.go b/services/keepstore/command.go
index 555f16dfe1..9f14c13384 100644
--- a/services/keepstore/command.go
+++ b/services/keepstore/command.go
@@ -7,210 +7,27 @@ package keepstore
import (
"context"
"errors"
- "flag"
- "fmt"
- "io"
- "math/rand"
- "net/http"
- "os"
- "sync"
- "git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/prometheus/client_golang/prometheus"
- "github.com/sirupsen/logrus"
)
var (
Command = service.Command(arvados.ServiceNameKeepstore, newHandlerOrErrorHandler)
)
-func runCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
- args, ok, code := convertKeepstoreFlagsToServiceFlags(prog, args, ctxlog.FromContext(context.Background()), stderr)
- if !ok {
- return code
- }
- return Command.RunCommand(prog, args, stdin, stdout, stderr)
-}
-
-// Parse keepstore command line flags, and return equivalent
-// service.Command flags. If the second return value ("ok") is false,
-// the program should exit, and the third return value is a suitable
-// exit code.
-func convertKeepstoreFlagsToServiceFlags(prog string, args []string, lgr logrus.FieldLogger, stderr io.Writer) ([]string, bool, int) {
- flags := flag.NewFlagSet("", flag.ContinueOnError)
- flags.String("listen", "", "Services.Keepstore.InternalURLs")
- flags.Int("max-buffers", 0, "API.MaxKeepBlobBuffers")
- flags.Int("max-requests", 0, "API.MaxConcurrentRequests")
- flags.Bool("never-delete", false, "Collections.BlobTrash")
- flags.Bool("enforce-permissions", false, "Collections.BlobSigning")
- flags.String("permission-key-file", "", "Collections.BlobSigningKey")
- flags.String("blob-signing-key-file", "", "Collections.BlobSigningKey")
- flags.String("data-manager-token-file", "", "SystemRootToken")
- flags.Int("permission-ttl", 0, "Collections.BlobSigningTTL")
- flags.Int("blob-signature-ttl", 0, "Collections.BlobSigningTTL")
- flags.String("trash-lifetime", "", "Collections.BlobTrashLifetime")
- flags.Bool("serialize", false, "Volumes.*.DriverParameters.Serialize")
- flags.Bool("readonly", false, "Volumes.*.ReadOnly")
- flags.String("pid", "", "-")
- flags.String("trash-check-interval", "", "Collections.BlobTrashCheckInterval")
-
- flags.String("azure-storage-container-volume", "", "Volumes.*.Driver")
- flags.String("azure-storage-account-name", "", "Volumes.*.DriverParameters.StorageAccountName")
- flags.String("azure-storage-account-key-file", "", "Volumes.*.DriverParameters.StorageAccountKey")
- flags.String("azure-storage-replication", "", "Volumes.*.Replication")
- flags.String("azure-max-get-bytes", "", "Volumes.*.DriverParameters.MaxDataReadSize")
-
- flags.String("s3-bucket-volume", "", "Volumes.*.DriverParameters.Bucket")
- flags.String("s3-region", "", "Volumes.*.DriverParameters.Region")
- flags.String("s3-endpoint", "", "Volumes.*.DriverParameters.Endpoint")
- flags.String("s3-access-key-file", "", "Volumes.*.DriverParameters.AccessKeyID")
- flags.String("s3-secret-key-file", "", "Volumes.*.DriverParameters.SecretAccessKey")
- flags.String("s3-race-window", "", "Volumes.*.DriverParameters.RaceWindow")
- flags.String("s3-replication", "", "Volumes.*.Replication")
- flags.String("s3-unsafe-delete", "", "Volumes.*.DriverParameters.UnsafeDelete")
-
- flags.String("volume", "", "Volumes")
-
- flags.Bool("version", false, "")
- flags.String("config", "", "")
- flags.String("legacy-keepstore-config", "", "")
-
- if ok, code := cmd.ParseFlags(flags, prog, args, "", stderr); !ok {
- return nil, false, code
- }
-
- args = nil
- ok := true
- flags.Visit(func(f *flag.Flag) {
- if f.Name == "config" || f.Name == "legacy-keepstore-config" || f.Name == "version" {
- args = append(args, "-"+f.Name, f.Value.String())
- } else if f.Usage == "-" {
- ok = false
- lgr.Errorf("command line flag -%s is no longer supported", f.Name)
- } else {
- ok = false
- lgr.Errorf("command line flag -%s is no longer supported -- use Clusters.*.%s in cluster config file instead", f.Name, f.Usage)
- }
- })
- if !ok {
- return nil, false, 2
- }
-
- flags = flag.NewFlagSet("", flag.ContinueOnError)
- loader := config.NewLoader(nil, lgr)
- loader.SetupFlags(flags)
- return loader.MungeLegacyConfigArgs(lgr, args, "-legacy-keepstore-config"), true, 0
-}
-
-type handler struct {
- http.Handler
- Cluster *arvados.Cluster
- Logger logrus.FieldLogger
-
- pullq *WorkQueue
- trashq *WorkQueue
- volmgr *RRVolumeManager
- keepClient *keepclient.KeepClient
-
- err error
- setupOnce sync.Once
-}
-
-func (h *handler) CheckHealth() error {
- return h.err
-}
-
-func (h *handler) Done() <-chan struct{} {
- return nil
-}
-
func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
- var h handler
serviceURL, ok := service.URLFromContext(ctx)
if !ok {
return service.ErrorHandler(ctx, cluster, errors.New("BUG: no URL from service.URLFromContext"))
}
- err := h.setup(ctx, cluster, token, reg, serviceURL)
+ ks, err := newKeepstore(ctx, cluster, token, reg, serviceURL)
if err != nil {
return service.ErrorHandler(ctx, cluster, err)
}
- return &h
-}
-
-func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry, serviceURL arvados.URL) error {
- h.Cluster = cluster
- h.Logger = ctxlog.FromContext(ctx)
- if h.Cluster.API.MaxKeepBlobBuffers <= 0 {
- return fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
- }
- bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
-
- if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
- h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
- }
-
- if h.Cluster.Collections.BlobSigningKey != "" {
- } else if h.Cluster.Collections.BlobSigning {
- return errors.New("cannot enable Collections.BlobSigning with no Collections.BlobSigningKey")
- } else {
- h.Logger.Warn("Running without a blob signing key. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions. To fix this, configure Collections.BlobSigning and Collections.BlobSigningKey.")
- }
-
- if len(h.Cluster.Volumes) == 0 {
- return errors.New("no volumes configured")
- }
-
- h.Logger.Printf("keepstore %s starting, pid %d", cmd.Version.String(), os.Getpid())
-
- // Start a round-robin VolumeManager with the configured volumes.
- vm, err := makeRRVolumeManager(h.Logger, h.Cluster, serviceURL, newVolumeMetricsVecs(reg))
- if err != nil {
- return err
- }
- if len(vm.readables) == 0 {
- return fmt.Errorf("no volumes configured for %s", serviceURL)
- }
- h.volmgr = vm
-
- // Initialize the pullq and workers
- h.pullq = NewWorkQueue()
- for i := 0; i < 1 || i < h.Cluster.Collections.BlobReplicateConcurrency; i++ {
- go h.runPullWorker(h.pullq)
- }
-
- // Initialize the trashq and workers
- h.trashq = NewWorkQueue()
- for i := 0; i < 1 || i < h.Cluster.Collections.BlobTrashConcurrency; i++ {
- go RunTrashWorker(h.volmgr, h.Logger, h.Cluster, h.trashq)
- }
-
- // Set up routes and metrics
- h.Handler = MakeRESTRouter(ctx, cluster, reg, vm, h.pullq, h.trashq)
-
- // Initialize keepclient for pull workers
- c, err := arvados.NewClientFromConfig(cluster)
- if err != nil {
- return err
- }
- ac, err := arvadosclient.New(c)
- if err != nil {
- return err
- }
- h.keepClient = &keepclient.KeepClient{
- Arvados: ac,
- Want_replicas: 1,
- }
- h.keepClient.Arvados.ApiToken = fmt.Sprintf("%x", rand.Int63())
-
- if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 {
- go emptyTrash(h.volmgr.writables, d)
- }
-
- return nil
+ puller := newPuller(ctx, ks, reg)
+ trasher := newTrasher(ctx, ks, reg)
+ _ = newTrashEmptier(ctx, ks, reg)
+ return newRouter(ks, puller, trasher)
}
diff --git a/services/keepstore/command_test.go b/services/keepstore/command_test.go
index bbfae52f69..942c01a779 100644
--- a/services/keepstore/command_test.go
+++ b/services/keepstore/command_test.go
@@ -23,7 +23,7 @@ func (*CommandSuite) TestLegacyConfigPath(c *check.C) {
defer os.Remove(tmp.Name())
tmp.Write([]byte("Listen: \"1.2.3.4.5:invalidport\"\n"))
tmp.Close()
- exited := runCommand("keepstore", []string{"-config", tmp.Name()}, &stdin, &stdout, &stderr)
+ exited := Command.RunCommand("keepstore", []string{"-config", tmp.Name()}, &stdin, &stdout, &stderr)
c.Check(exited, check.Equals, 1)
c.Check(stderr.String(), check.Matches, `(?ms).*unable to migrate Listen value.*`)
}
diff --git a/services/keepstore/count.go b/services/keepstore/count.go
index 700ca19dec..51434a803e 100644
--- a/services/keepstore/count.go
+++ b/services/keepstore/count.go
@@ -8,21 +8,21 @@ import (
"io"
)
-func NewCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
+func newCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
return &countingReadWriter{
writer: w,
counter: f,
}
}
-func NewCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
+func newCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
return &countingReadWriter{
reader: r,
counter: f,
}
}
-func NewCountingReaderAtSeeker(r readerAtSeeker, f func(uint64)) *countingReaderAtSeeker {
+func newCountingReaderAtSeeker(r readerAtSeeker, f func(uint64)) *countingReaderAtSeeker {
return &countingReaderAtSeeker{readerAtSeeker: r, counter: f}
}
diff --git a/services/keepstore/gocheck_test.go b/services/keepstore/gocheck_test.go
deleted file mode 100644
index 90076db5b2..0000000000
--- a/services/keepstore/gocheck_test.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "gopkg.in/check.v1"
- "testing"
-)
-
-func TestGocheck(t *testing.T) {
- check.TestingT(t)
-}
diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go
deleted file mode 100644
index d545bde0ab..0000000000
--- a/services/keepstore/handler_test.go
+++ /dev/null
@@ -1,1411 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Tests for Keep HTTP handlers:
-//
-// GetBlockHandler
-// PutBlockHandler
-// IndexHandler
-//
-// The HTTP handlers are responsible for enforcing permission policy,
-// so these tests must exercise all possible permission permutations.
-
-package keepstore
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "sort"
- "strings"
- "sync/atomic"
- "time"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/prometheus/client_golang/prometheus"
- check "gopkg.in/check.v1"
-)
-
-var testServiceURL = func() arvados.URL {
- return arvados.URL{Host: "localhost:12345", Scheme: "http"}
-}()
-
-func testCluster(t TB) *arvados.Cluster {
- cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
- if err != nil {
- t.Fatal(err)
- }
- cluster, err := cfg.GetCluster("")
- if err != nil {
- t.Fatal(err)
- }
- cluster.SystemRootToken = arvadostest.SystemRootToken
- cluster.ManagementToken = arvadostest.ManagementToken
- cluster.Collections.BlobSigning = false
- return cluster
-}
-
-var _ = check.Suite(&HandlerSuite{})
-
-type HandlerSuite struct {
- cluster *arvados.Cluster
- handler *handler
-}
-
-func (s *HandlerSuite) SetUpTest(c *check.C) {
- s.cluster = testCluster(c)
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
- "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
- }
- s.handler = &handler{}
-}
-
-// A RequestTester represents the parameters for an HTTP request to
-// be issued on behalf of a unit test.
-type RequestTester struct {
- uri string
- apiToken string
- method string
- requestBody []byte
- storageClasses string
-}
-
-// Test GetBlockHandler on the following situations:
-// - permissions off, unauthenticated request, unsigned locator
-// - permissions on, authenticated request, signed locator
-// - permissions on, authenticated request, unsigned locator
-// - permissions on, unauthenticated request, signed locator
-// - permissions on, authenticated request, expired locator
-// - permissions on, authenticated request, signed locator, transient error from backend
-//
-func (s *HandlerSuite) TestGetHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- vols := s.handler.volmgr.AllWritable()
- err := vols[0].Put(context.Background(), TestHash, TestBlock)
- c.Check(err, check.IsNil)
-
- // Create locators for testing.
- // Turn on permission settings so we can generate signed locators.
- s.cluster.Collections.BlobSigning = true
- s.cluster.Collections.BlobSigningKey = knownKey
- s.cluster.Collections.BlobSigningTTL.Set("5m")
-
- var (
- unsignedLocator = "/" + TestHash
- validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
- expiredTimestamp = time.Now().Add(-time.Hour)
- signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
- expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
- )
-
- // -----------------
- // Test unauthenticated request with permissions off.
- s.cluster.Collections.BlobSigning = false
-
- // Unauthenticated request, unsigned locator
- // => OK
- response := IssueRequest(s.handler,
- &RequestTester{
- method: "GET",
- uri: unsignedLocator,
- })
- ExpectStatusCode(c,
- "Unauthenticated request, unsigned locator", http.StatusOK, response)
- ExpectBody(c,
- "Unauthenticated request, unsigned locator",
- string(TestBlock),
- response)
-
- receivedLen := response.Header().Get("Content-Length")
- expectedLen := fmt.Sprintf("%d", len(TestBlock))
- if receivedLen != expectedLen {
- c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
- }
-
- // ----------------
- // Permissions: on.
- s.cluster.Collections.BlobSigning = true
-
- // Authenticated request, signed locator
- // => OK
- response = IssueRequest(s.handler, &RequestTester{
- method: "GET",
- uri: signedLocator,
- apiToken: knownToken,
- })
- ExpectStatusCode(c,
- "Authenticated request, signed locator", http.StatusOK, response)
- ExpectBody(c,
- "Authenticated request, signed locator", string(TestBlock), response)
-
- receivedLen = response.Header().Get("Content-Length")
- expectedLen = fmt.Sprintf("%d", len(TestBlock))
- if receivedLen != expectedLen {
- c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
- }
-
- // Authenticated request, unsigned locator
- // => PermissionError
- response = IssueRequest(s.handler, &RequestTester{
- method: "GET",
- uri: unsignedLocator,
- apiToken: knownToken,
- })
- ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
-
- // Unauthenticated request, signed locator
- // => PermissionError
- response = IssueRequest(s.handler, &RequestTester{
- method: "GET",
- uri: signedLocator,
- })
- ExpectStatusCode(c,
- "Unauthenticated request, signed locator",
- PermissionError.HTTPCode, response)
-
- // Authenticated request, expired locator
- // => ExpiredError
- response = IssueRequest(s.handler, &RequestTester{
- method: "GET",
- uri: expiredLocator,
- apiToken: knownToken,
- })
- ExpectStatusCode(c,
- "Authenticated request, expired locator",
- ExpiredError.HTTPCode, response)
-
- // Authenticated request, signed locator
- // => 503 Server busy (transient error)
-
- // Set up the block owning volume to respond with errors
- vols[0].Volume.(*MockVolume).Bad = true
- vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
- response = IssueRequest(s.handler, &RequestTester{
- method: "GET",
- uri: signedLocator,
- apiToken: knownToken,
- })
- // A transient error from one volume while the other doesn't find the block
- // should make the service return a 503 so that clients can retry.
- ExpectStatusCode(c,
- "Volume backend busy",
- 503, response)
-}
-
-// Test PutBlockHandler on the following situations:
-// - no server key
-// - with server key, authenticated request, unsigned locator
-// - with server key, unauthenticated request, unsigned locator
-//
-func (s *HandlerSuite) TestPutHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- // --------------
- // No server key.
-
- s.cluster.Collections.BlobSigningKey = ""
-
- // Unauthenticated request, no server key
- // => OK (unsigned response)
- unsignedLocator := "/" + TestHash
- response := IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: unsignedLocator,
- requestBody: TestBlock,
- })
-
- ExpectStatusCode(c,
- "Unauthenticated request, no server key", http.StatusOK, response)
- ExpectBody(c,
- "Unauthenticated request, no server key",
- TestHashPutResp, response)
-
- // ------------------
- // With a server key.
-
- s.cluster.Collections.BlobSigningKey = knownKey
- s.cluster.Collections.BlobSigningTTL.Set("5m")
-
- // When a permission key is available, the locator returned
- // from an authenticated PUT request will be signed.
-
- // Authenticated PUT, signed locator
- // => OK (signed response)
- response = IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: unsignedLocator,
- requestBody: TestBlock,
- apiToken: knownToken,
- })
-
- ExpectStatusCode(c,
- "Authenticated PUT, signed locator, with server key",
- http.StatusOK, response)
- responseLocator := strings.TrimSpace(response.Body.String())
- if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
- c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
- "response '%s' does not contain a valid signature",
- responseLocator)
- }
-
- // Unauthenticated PUT, unsigned locator
- // => OK
- response = IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: unsignedLocator,
- requestBody: TestBlock,
- })
-
- ExpectStatusCode(c,
- "Unauthenticated PUT, unsigned locator, with server key",
- http.StatusOK, response)
- ExpectBody(c,
- "Unauthenticated PUT, unsigned locator, with server key",
- TestHashPutResp, response)
-}
-
-func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
- s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- s.cluster.SystemRootToken = "fake-data-manager-token"
- IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- })
-
- s.cluster.Collections.BlobTrash = true
- IssueRequest(s.handler,
- &RequestTester{
- method: "DELETE",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- apiToken: s.cluster.SystemRootToken,
- })
- type expect struct {
- volid string
- method string
- callcount int
- }
- for _, e := range []expect{
- {"zzzzz-nyw5e-000000000000000", "Get", 0},
- {"zzzzz-nyw5e-000000000000000", "Compare", 0},
- {"zzzzz-nyw5e-000000000000000", "Touch", 0},
- {"zzzzz-nyw5e-000000000000000", "Put", 0},
- {"zzzzz-nyw5e-000000000000000", "Delete", 0},
- {"zzzzz-nyw5e-111111111111111", "Get", 0},
- {"zzzzz-nyw5e-111111111111111", "Compare", 1},
- {"zzzzz-nyw5e-111111111111111", "Touch", 1},
- {"zzzzz-nyw5e-111111111111111", "Put", 1},
- {"zzzzz-nyw5e-111111111111111", "Delete", 1},
- } {
- if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
- c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
- }
- }
-}
-
-func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-111111111111111": {
- Driver: "mock",
- Replication: 1,
- StorageClasses: map[string]bool{"class1": true}},
- "zzzzz-nyw5e-222222222222222": {
- Driver: "mock",
- Replication: 1,
- StorageClasses: map[string]bool{"class2": true, "class3": true}},
- }
-
- for _, trial := range []struct {
- priority1 int // priority of class1, thus vol1
- priority2 int // priority of class2
- priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
- get1 int // expected number of "get" ops on vol1
- get2 int // expected number of "get" ops on vol2
- }{
- {100, 50, 50, 1, 0}, // class1 has higher priority => try vol1 first, no need to try vol2
- {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
- {66, 99, 33, 1, 1}, // class2 has higher priority => try vol2 first, then try vol1
- {66, 33, 99, 1, 1}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
- } {
- c.Logf("%+v", trial)
- s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
- "class1": {Priority: trial.priority1},
- "class2": {Priority: trial.priority2},
- "class3": {Priority: trial.priority3},
- }
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- storageClasses: "class1",
- })
- IssueRequest(s.handler,
- &RequestTester{
- method: "GET",
- uri: "/" + TestHash,
- })
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
- }
-}
-
-func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-111111111111111": {
- Driver: "mock",
- Replication: 1,
- ReadOnly: true,
- StorageClasses: map[string]bool{"class1": true}},
- }
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- resp := IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- storageClasses: "class1",
- })
- c.Check(resp.Code, check.Equals, FullError.HTTPCode)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
-}
-
-func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-111111111111111": {
- Driver: "mock",
- Replication: 1,
- StorageClasses: map[string]bool{"class1": true}},
- "zzzzz-nyw5e-121212121212121": {
- Driver: "mock",
- Replication: 1,
- StorageClasses: map[string]bool{"class1": true, "class2": true}},
- "zzzzz-nyw5e-222222222222222": {
- Driver: "mock",
- Replication: 1,
- StorageClasses: map[string]bool{"class2": true}},
- }
-
- for _, trial := range []struct {
- setCounter uint32 // value to stuff vm.counter, to control offset
- classes string // desired classes
- put111 int // expected number of "put" ops on 11111... after 2x put reqs
- put121 int // expected number of "put" ops on 12121...
- put222 int // expected number of "put" ops on 22222...
- cmp111 int // expected number of "compare" ops on 11111... after 2x put reqs
- cmp121 int // expected number of "compare" ops on 12121...
- cmp222 int // expected number of "compare" ops on 22222...
- }{
- {0, "class1",
- 1, 0, 0,
- 2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
- {0, "class2",
- 0, 1, 0,
- 0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
- {0, "class1,class2",
- 1, 1, 0,
- 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
- {1, "class1,class2",
- 0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
- 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
- {0, "class1,class2,class404",
- 1, 1, 0,
- 2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
- } {
- c.Logf("%+v", trial)
- s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
- "class1": {},
- "class2": {},
- "class3": {},
- }
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
- for i := 0; i < 2; i++ {
- IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- storageClasses: trial.classes,
- })
- }
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
- c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
- }
-}
-
-// Test TOUCH requests.
-func (s *HandlerSuite) TestTouchHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- vols := s.handler.volmgr.AllWritable()
- vols[0].Put(context.Background(), TestHash, TestBlock)
- vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
- afterPut := time.Now()
- t, err := vols[0].Mtime(TestHash)
- c.Assert(err, check.IsNil)
- c.Assert(t.Before(afterPut), check.Equals, true)
-
- ExpectStatusCode(c,
- "touch with no credentials",
- http.StatusUnauthorized,
- IssueRequest(s.handler, &RequestTester{
- method: "TOUCH",
- uri: "/" + TestHash,
- }))
-
- ExpectStatusCode(c,
- "touch with non-root credentials",
- http.StatusUnauthorized,
- IssueRequest(s.handler, &RequestTester{
- method: "TOUCH",
- uri: "/" + TestHash,
- apiToken: arvadostest.ActiveTokenV2,
- }))
-
- ExpectStatusCode(c,
- "touch non-existent block",
- http.StatusNotFound,
- IssueRequest(s.handler, &RequestTester{
- method: "TOUCH",
- uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- apiToken: s.cluster.SystemRootToken,
- }))
-
- beforeTouch := time.Now()
- ExpectStatusCode(c,
- "touch block",
- http.StatusOK,
- IssueRequest(s.handler, &RequestTester{
- method: "TOUCH",
- uri: "/" + TestHash,
- apiToken: s.cluster.SystemRootToken,
- }))
- t, err = vols[0].Mtime(TestHash)
- c.Assert(err, check.IsNil)
- c.Assert(t.After(beforeTouch), check.Equals, true)
-}
-
-// Test /index requests:
-// - unauthenticated /index request
-// - unauthenticated /index/prefix request
-// - authenticated /index request | non-superuser
-// - authenticated /index/prefix request | non-superuser
-// - authenticated /index request | superuser
-// - authenticated /index/prefix request | superuser
-//
-// The only /index requests that should succeed are those issued by the
-// superuser. They should pass regardless of the value of BlobSigning.
-//
-func (s *HandlerSuite) TestIndexHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- // Include multiple blocks on different volumes, and
- // some metadata files (which should be omitted from index listings)
- vols := s.handler.volmgr.AllWritable()
- vols[0].Put(context.Background(), TestHash, TestBlock)
- vols[1].Put(context.Background(), TestHash2, TestBlock2)
- vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
- vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
-
- s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
- unauthenticatedReq := &RequestTester{
- method: "GET",
- uri: "/index",
- }
- authenticatedReq := &RequestTester{
- method: "GET",
- uri: "/index",
- apiToken: knownToken,
- }
- superuserReq := &RequestTester{
- method: "GET",
- uri: "/index",
- apiToken: s.cluster.SystemRootToken,
- }
- unauthPrefixReq := &RequestTester{
- method: "GET",
- uri: "/index/" + TestHash[0:3],
- }
- authPrefixReq := &RequestTester{
- method: "GET",
- uri: "/index/" + TestHash[0:3],
- apiToken: knownToken,
- }
- superuserPrefixReq := &RequestTester{
- method: "GET",
- uri: "/index/" + TestHash[0:3],
- apiToken: s.cluster.SystemRootToken,
- }
- superuserNoSuchPrefixReq := &RequestTester{
- method: "GET",
- uri: "/index/abcd",
- apiToken: s.cluster.SystemRootToken,
- }
- superuserInvalidPrefixReq := &RequestTester{
- method: "GET",
- uri: "/index/xyz",
- apiToken: s.cluster.SystemRootToken,
- }
-
- // -------------------------------------------------------------
- // Only the superuser should be allowed to issue /index requests.
-
- // ---------------------------
- // BlobSigning enabled
- // This setting should not affect tests passing.
- s.cluster.Collections.BlobSigning = true
-
- // unauthenticated /index request
- // => UnauthorizedError
- response := IssueRequest(s.handler, unauthenticatedReq)
- ExpectStatusCode(c,
- "permissions on, unauthenticated request",
- UnauthorizedError.HTTPCode,
- response)
-
- // unauthenticated /index/prefix request
- // => UnauthorizedError
- response = IssueRequest(s.handler, unauthPrefixReq)
- ExpectStatusCode(c,
- "permissions on, unauthenticated /index/prefix request",
- UnauthorizedError.HTTPCode,
- response)
-
- // authenticated /index request, non-superuser
- // => UnauthorizedError
- response = IssueRequest(s.handler, authenticatedReq)
- ExpectStatusCode(c,
- "permissions on, authenticated request, non-superuser",
- UnauthorizedError.HTTPCode,
- response)
-
- // authenticated /index/prefix request, non-superuser
- // => UnauthorizedError
- response = IssueRequest(s.handler, authPrefixReq)
- ExpectStatusCode(c,
- "permissions on, authenticated /index/prefix request, non-superuser",
- UnauthorizedError.HTTPCode,
- response)
-
- // superuser /index request
- // => OK
- response = IssueRequest(s.handler, superuserReq)
- ExpectStatusCode(c,
- "permissions on, superuser request",
- http.StatusOK,
- response)
-
- // ----------------------------
- // BlobSigning disabled
- // Valid Request should still pass.
- s.cluster.Collections.BlobSigning = false
-
- // superuser /index request
- // => OK
- response = IssueRequest(s.handler, superuserReq)
- ExpectStatusCode(c,
- "permissions on, superuser request",
- http.StatusOK,
- response)
-
- expected := `^` + TestHash + `\+\d+ \d+\n` +
- TestHash2 + `\+\d+ \d+\n\n$`
- c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
- "permissions on, superuser request"))
-
- // superuser /index/prefix request
- // => OK
- response = IssueRequest(s.handler, superuserPrefixReq)
- ExpectStatusCode(c,
- "permissions on, superuser request",
- http.StatusOK,
- response)
-
- expected = `^` + TestHash + `\+\d+ \d+\n\n$`
- c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
- "permissions on, superuser /index/prefix request"))
-
- // superuser /index/{no-such-prefix} request
- // => OK
- response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
- ExpectStatusCode(c,
- "permissions on, superuser request",
- http.StatusOK,
- response)
-
- if "\n" != response.Body.String() {
- c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
- }
-
- // superuser /index/{invalid-prefix} request
- // => StatusBadRequest
- response = IssueRequest(s.handler, superuserInvalidPrefixReq)
- ExpectStatusCode(c,
- "permissions on, superuser request",
- http.StatusBadRequest,
- response)
-}
-
-// TestDeleteHandler
-//
-// Cases tested:
-//
-// With no token and with a non-data-manager token:
-// * Delete existing block
-// (test for 403 Forbidden, confirm block not deleted)
-//
-// With data manager token:
-//
-// * Delete existing block
-// (test for 200 OK, response counts, confirm block deleted)
-//
-// * Delete nonexistent block
-// (test for 200 OK, response counts)
-//
-// TODO(twp):
-//
-// * Delete block on read-only and read-write volume
-// (test for 200 OK, response with copies_deleted=1,
-// copies_failed=1, confirm block deleted only on r/w volume)
-//
-// * Delete block on read-only volume only
-// (test for 200 OK, response with copies_deleted=0, copies_failed=1,
-// confirm block not deleted)
-//
-func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- vols := s.handler.volmgr.AllWritable()
- vols[0].Put(context.Background(), TestHash, TestBlock)
-
- // Explicitly set the BlobSigningTTL to 0 for these
- // tests, to ensure the MockVolume deletes the blocks
- // even though they have just been created.
- s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
-
- var userToken = "NOT DATA MANAGER TOKEN"
- s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
- s.cluster.Collections.BlobTrash = true
-
- unauthReq := &RequestTester{
- method: "DELETE",
- uri: "/" + TestHash,
- }
-
- userReq := &RequestTester{
- method: "DELETE",
- uri: "/" + TestHash,
- apiToken: userToken,
- }
-
- superuserExistingBlockReq := &RequestTester{
- method: "DELETE",
- uri: "/" + TestHash,
- apiToken: s.cluster.SystemRootToken,
- }
-
- superuserNonexistentBlockReq := &RequestTester{
- method: "DELETE",
- uri: "/" + TestHash2,
- apiToken: s.cluster.SystemRootToken,
- }
-
- // Unauthenticated request returns PermissionError.
- var response *httptest.ResponseRecorder
- response = IssueRequest(s.handler, unauthReq)
- ExpectStatusCode(c,
- "unauthenticated request",
- PermissionError.HTTPCode,
- response)
-
- // Authenticated non-admin request returns PermissionError.
- response = IssueRequest(s.handler, userReq)
- ExpectStatusCode(c,
- "authenticated non-admin request",
- PermissionError.HTTPCode,
- response)
-
- // Authenticated admin request for nonexistent block.
- type deletecounter struct {
- Deleted int `json:"copies_deleted"`
- Failed int `json:"copies_failed"`
- }
- var responseDc, expectedDc deletecounter
-
- response = IssueRequest(s.handler, superuserNonexistentBlockReq)
- ExpectStatusCode(c,
- "data manager request, nonexistent block",
- http.StatusNotFound,
- response)
-
- // Authenticated admin request for existing block while BlobTrash is false.
- s.cluster.Collections.BlobTrash = false
- response = IssueRequest(s.handler, superuserExistingBlockReq)
- ExpectStatusCode(c,
- "authenticated request, existing block, method disabled",
- MethodDisabledError.HTTPCode,
- response)
- s.cluster.Collections.BlobTrash = true
-
- // Authenticated admin request for existing block.
- response = IssueRequest(s.handler, superuserExistingBlockReq)
- ExpectStatusCode(c,
- "data manager request, existing block",
- http.StatusOK,
- response)
- // Expect response {"copies_deleted":1,"copies_failed":0}
- expectedDc = deletecounter{1, 0}
- json.NewDecoder(response.Body).Decode(&responseDc)
- if responseDc != expectedDc {
- c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
- expectedDc, responseDc)
- }
- // Confirm the block has been deleted
- buf := make([]byte, BlockSize)
- _, err := vols[0].Get(context.Background(), TestHash, buf)
- var blockDeleted = os.IsNotExist(err)
- if !blockDeleted {
- c.Error("superuserExistingBlockReq: block not deleted")
- }
-
- // A DELETE request on a block newer than BlobSigningTTL
- // should return success but leave the block on the volume.
- vols[0].Put(context.Background(), TestHash, TestBlock)
- s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
-
- response = IssueRequest(s.handler, superuserExistingBlockReq)
- ExpectStatusCode(c,
- "data manager request, existing block",
- http.StatusOK,
- response)
- // Expect response {"copies_deleted":1,"copies_failed":0}
- expectedDc = deletecounter{1, 0}
- json.NewDecoder(response.Body).Decode(&responseDc)
- if responseDc != expectedDc {
- c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
- expectedDc, responseDc)
- }
- // Confirm the block has NOT been deleted.
- _, err = vols[0].Get(context.Background(), TestHash, buf)
- if err != nil {
- c.Errorf("testing delete on new block: %s\n", err)
- }
-}
-
-// TestPullHandler
-//
-// Test handling of the PUT /pull statement.
-//
-// Cases tested: syntactically valid and invalid pull lists, from the
-// data manager and from unprivileged users:
-//
-// 1. Valid pull list from an ordinary user
-// (expected result: 401 Unauthorized)
-//
-// 2. Invalid pull request from an ordinary user
-// (expected result: 401 Unauthorized)
-//
-// 3. Valid pull request from the data manager
-// (expected result: 200 OK with request body "Received 3 pull
-// requests"
-//
-// 4. Invalid pull request from the data manager
-// (expected result: 400 Bad Request)
-//
-// Test that in the end, the pull manager received a good pull list with
-// the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously. Make sure that none of them return 400
-// Bad Request and that pullq.GetList() returns a valid list.
-//
-func (s *HandlerSuite) TestPullHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- // Replace the router's pullq -- which the worker goroutines
- // started by setup() are now receiving from -- with a new
- // one, so we can see what the handler sends to it.
- pullq := NewWorkQueue()
- s.handler.Handler.(*router).pullq = pullq
-
- var userToken = "USER TOKEN"
- s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
- goodJSON := []byte(`[
- {
- "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
- "servers":[
- "http://server1",
- "http://server2"
- ]
- },
- {
- "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
- "servers":[]
- },
- {
- "locator":"cccccccccccccccccccccccccccccccc+12345",
- "servers":["http://server1"]
- }
- ]`)
-
- badJSON := []byte(`{ "key":"I'm a little teapot" }`)
-
- type pullTest struct {
- name string
- req RequestTester
- responseCode int
- responseBody string
- }
- var testcases = []pullTest{
- {
- "Valid pull list from an ordinary user",
- RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
- http.StatusUnauthorized,
- "Unauthorized\n",
- },
- {
- "Invalid pull request from an ordinary user",
- RequestTester{"/pull", userToken, "PUT", badJSON, ""},
- http.StatusUnauthorized,
- "Unauthorized\n",
- },
- {
- "Valid pull request from the data manager",
- RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
- http.StatusOK,
- "Received 3 pull requests\n",
- },
- {
- "Invalid pull request from the data manager",
- RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
- http.StatusBadRequest,
- "",
- },
- }
-
- for _, tst := range testcases {
- response := IssueRequest(s.handler, &tst.req)
- ExpectStatusCode(c, tst.name, tst.responseCode, response)
- ExpectBody(c, tst.name, tst.responseBody, response)
- }
-
- // The Keep pull manager should have received one good list with 3
- // requests on it.
- for i := 0; i < 3; i++ {
- var item interface{}
- select {
- case item = <-pullq.NextItem:
- case <-time.After(time.Second):
- c.Error("timed out")
- }
- if _, ok := item.(PullRequest); !ok {
- c.Errorf("item %v could not be parsed as a PullRequest", item)
- }
- }
-
- expectChannelEmpty(c, pullq.NextItem)
-}
-
-// TestTrashHandler
-//
-// Test cases:
-//
-// Cases tested: syntactically valid and invalid trash lists, from the
-// data manager and from unprivileged users:
-//
-// 1. Valid trash list from an ordinary user
-// (expected result: 401 Unauthorized)
-//
-// 2. Invalid trash list from an ordinary user
-// (expected result: 401 Unauthorized)
-//
-// 3. Valid trash list from the data manager
-// (expected result: 200 OK with request body "Received 3 trash
-// requests"
-//
-// 4. Invalid trash list from the data manager
-// (expected result: 400 Bad Request)
-//
-// Test that in the end, the trash collector received a good list
-// trash list with the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously. Make sure that none of them return 400
-// Bad Request and that replica.Dump() returns a valid list.
-//
-func (s *HandlerSuite) TestTrashHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- // Replace the router's trashq -- which the worker goroutines
- // started by setup() are now receiving from -- with a new
- // one, so we can see what the handler sends to it.
- trashq := NewWorkQueue()
- s.handler.Handler.(*router).trashq = trashq
-
- var userToken = "USER TOKEN"
- s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
- goodJSON := []byte(`[
- {
- "locator":"block1",
- "block_mtime":1409082153
- },
- {
- "locator":"block2",
- "block_mtime":1409082153
- },
- {
- "locator":"block3",
- "block_mtime":1409082153
- }
- ]`)
-
- badJSON := []byte(`I am not a valid JSON string`)
-
- type trashTest struct {
- name string
- req RequestTester
- responseCode int
- responseBody string
- }
-
- var testcases = []trashTest{
- {
- "Valid trash list from an ordinary user",
- RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
- http.StatusUnauthorized,
- "Unauthorized\n",
- },
- {
- "Invalid trash list from an ordinary user",
- RequestTester{"/trash", userToken, "PUT", badJSON, ""},
- http.StatusUnauthorized,
- "Unauthorized\n",
- },
- {
- "Valid trash list from the data manager",
- RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
- http.StatusOK,
- "Received 3 trash requests\n",
- },
- {
- "Invalid trash list from the data manager",
- RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
- http.StatusBadRequest,
- "",
- },
- }
-
- for _, tst := range testcases {
- response := IssueRequest(s.handler, &tst.req)
- ExpectStatusCode(c, tst.name, tst.responseCode, response)
- ExpectBody(c, tst.name, tst.responseBody, response)
- }
-
- // The trash collector should have received one good list with 3
- // requests on it.
- for i := 0; i < 3; i++ {
- item := <-trashq.NextItem
- if _, ok := item.(TrashRequest); !ok {
- c.Errorf("item %v could not be parsed as a TrashRequest", item)
- }
- }
-
- expectChannelEmpty(c, trashq.NextItem)
-}
-
-// ====================
-// Helper functions
-// ====================
-
-// IssueTestRequest executes an HTTP request described by rt, to a
-// REST router. It returns the HTTP response to the request.
-func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
- response := httptest.NewRecorder()
- body := bytes.NewReader(rt.requestBody)
- req, _ := http.NewRequest(rt.method, rt.uri, body)
- if rt.apiToken != "" {
- req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
- }
- if rt.storageClasses != "" {
- req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
- }
- handler.ServeHTTP(response, req)
- return response
-}
-
-func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
- response := httptest.NewRecorder()
- body := bytes.NewReader(rt.requestBody)
- req, _ := http.NewRequest(rt.method, rt.uri, body)
- if rt.apiToken != "" {
- req.Header.Set("Authorization", "Bearer "+rt.apiToken)
- }
- handler.ServeHTTP(response, req)
- return response
-}
-
-// ExpectStatusCode checks whether a response has the specified status code,
-// and reports a test failure if not.
-func ExpectStatusCode(
- c *check.C,
- testname string,
- expectedStatus int,
- response *httptest.ResponseRecorder) {
- c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
-}
-
-func ExpectBody(
- c *check.C,
- testname string,
- expectedBody string,
- response *httptest.ResponseRecorder) {
- if expectedBody != "" && response.Body.String() != expectedBody {
- c.Errorf("%s: expected response body '%s', got %+v",
- testname, expectedBody, response)
- }
-}
-
-// See #7121
-func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- defer func(orig *bufferPool) {
- bufs = orig
- }(bufs)
- bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-
- ok := make(chan struct{})
- go func() {
- for i := 0; i < 2; i++ {
- response := IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- })
- ExpectStatusCode(c,
- "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
- }
- ok <- struct{}{}
- }()
-
- select {
- case <-ok:
- case <-time.After(time.Second):
- c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
- }
-}
-
-// Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- ok := make(chan bool)
- go func() {
- for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
- // Unauthenticated request, no server key
- // => OK (unsigned response)
- unsignedLocator := "/" + TestHash
- response := IssueRequest(s.handler,
- &RequestTester{
- method: "PUT",
- uri: unsignedLocator,
- requestBody: TestBlock,
- })
- ExpectStatusCode(c,
- "TestPutHandlerBufferleak", http.StatusOK, response)
- ExpectBody(c,
- "TestPutHandlerBufferleak",
- TestHashPutResp, response)
- }
- ok <- true
- }()
- select {
- case <-time.After(20 * time.Second):
- // If the buffer pool leaks, the test goroutine hangs.
- c.Fatal("test did not finish, assuming pool leaked")
- case <-ok:
- }
-}
-
-func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
- s.cluster.Collections.BlobSigning = false
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- defer func(orig *bufferPool) {
- bufs = orig
- }(bufs)
- bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
- defer bufs.Put(bufs.Get(BlockSize))
-
- err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
- c.Assert(err, check.IsNil)
-
- resp := httptest.NewRecorder()
- ok := make(chan struct{})
- go func() {
- ctx, cancel := context.WithCancel(context.Background())
- req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
- cancel()
- s.handler.ServeHTTP(resp, req)
- ok <- struct{}{}
- }()
-
- select {
- case <-time.After(20 * time.Second):
- c.Fatal("request took >20s, close notifier must be broken")
- case <-ok:
- }
-
- ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
- for i, v := range s.handler.volmgr.AllWritable() {
- if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
- c.Errorf("volume %d got %d calls, expected 0", i, calls)
- }
- }
-}
-
-// Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- vols := s.handler.volmgr.AllWritable()
- if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
- c.Error(err)
- }
-
- ok := make(chan bool)
- go func() {
- for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
- // Unauthenticated request, unsigned locator
- // => OK
- unsignedLocator := "/" + TestHash
- response := IssueRequest(s.handler,
- &RequestTester{
- method: "GET",
- uri: unsignedLocator,
- })
- ExpectStatusCode(c,
- "Unauthenticated request, unsigned locator", http.StatusOK, response)
- ExpectBody(c,
- "Unauthenticated request, unsigned locator",
- string(TestBlock),
- response)
- }
- ok <- true
- }()
- select {
- case <-time.After(20 * time.Second):
- // If the buffer pool leaks, the test goroutine hangs.
- c.Fatal("test did not finish, assuming pool leaked")
- case <-ok:
- }
-}
-
-func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
- "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
- "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
- }
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- rt := RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- }
-
- for _, trial := range []struct {
- ask string
- expect string
- }{
- {"", ""},
- {"default", "default=1"},
- {" , default , default , ", "default=1"},
- {"special", "extra=1, special=1"},
- {"special, readonly", "extra=1, special=1"},
- {"special, nonexistent", "extra=1, special=1"},
- {"extra, special", "extra=1, special=1"},
- {"default, special", "default=1, extra=1, special=1"},
- } {
- c.Logf("success case %#v", trial)
- rt.storageClasses = trial.ask
- resp := IssueRequest(s.handler, &rt)
- if trial.expect == "" {
- // any non-empty value is correct
- c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
- } else {
- c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
- }
- }
-
- for _, trial := range []struct {
- ask string
- }{
- {"doesnotexist"},
- {"doesnotexist, readonly"},
- {"readonly"},
- } {
- c.Logf("failure case %#v", trial)
- rt.storageClasses = trial.ask
- resp := IssueRequest(s.handler, &rt)
- c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
- }
-}
-
-func sortCommaSeparated(s string) string {
- slice := strings.Split(s, ", ")
- sort.Strings(slice)
- return strings.Join(slice, ", ")
-}
-
-func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- resp := IssueRequest(s.handler, &RequestTester{
- method: "PUT",
- uri: "/" + TestHash,
- requestBody: TestBlock,
- })
- c.Logf("%#v", resp)
- c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
- c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
-}
-
-func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- // Set up Keep volumes
- vols := s.handler.volmgr.AllWritable()
- vols[0].Put(context.Background(), TestHash, TestBlock)
-
- s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
- // unauthenticatedReq => UnauthorizedError
- unauthenticatedReq := &RequestTester{
- method: "PUT",
- uri: "/untrash/" + TestHash,
- }
- response := IssueRequest(s.handler, unauthenticatedReq)
- ExpectStatusCode(c,
- "Unauthenticated request",
- UnauthorizedError.HTTPCode,
- response)
-
- // notDataManagerReq => UnauthorizedError
- notDataManagerReq := &RequestTester{
- method: "PUT",
- uri: "/untrash/" + TestHash,
- apiToken: knownToken,
- }
-
- response = IssueRequest(s.handler, notDataManagerReq)
- ExpectStatusCode(c,
- "Non-datamanager token",
- UnauthorizedError.HTTPCode,
- response)
-
- // datamanagerWithBadHashReq => StatusBadRequest
- datamanagerWithBadHashReq := &RequestTester{
- method: "PUT",
- uri: "/untrash/thisisnotalocator",
- apiToken: s.cluster.SystemRootToken,
- }
- response = IssueRequest(s.handler, datamanagerWithBadHashReq)
- ExpectStatusCode(c,
- "Bad locator in untrash request",
- http.StatusBadRequest,
- response)
-
- // datamanagerWrongMethodReq => StatusBadRequest
- datamanagerWrongMethodReq := &RequestTester{
- method: "GET",
- uri: "/untrash/" + TestHash,
- apiToken: s.cluster.SystemRootToken,
- }
- response = IssueRequest(s.handler, datamanagerWrongMethodReq)
- ExpectStatusCode(c,
- "Only PUT method is supported for untrash",
- http.StatusMethodNotAllowed,
- response)
-
- // datamanagerReq => StatusOK
- datamanagerReq := &RequestTester{
- method: "PUT",
- uri: "/untrash/" + TestHash,
- apiToken: s.cluster.SystemRootToken,
- }
- response = IssueRequest(s.handler, datamanagerReq)
- ExpectStatusCode(c,
- "",
- http.StatusOK,
- response)
- c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
-}
-
-func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
- // Change all volumes to read-only
- for uuid, v := range s.cluster.Volumes {
- v.ReadOnly = true
- s.cluster.Volumes[uuid] = v
- }
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- // datamanagerReq => StatusOK
- datamanagerReq := &RequestTester{
- method: "PUT",
- uri: "/untrash/" + TestHash,
- apiToken: s.cluster.SystemRootToken,
- }
- response := IssueRequest(s.handler, datamanagerReq)
- ExpectStatusCode(c,
- "No writable volumes",
- http.StatusNotFound,
- response)
-}
-
-func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
- s.cluster.ManagementToken = arvadostest.ManagementToken
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- pingReq := &RequestTester{
- method: "GET",
- uri: "/_health/ping",
- apiToken: arvadostest.ManagementToken,
- }
- response := IssueHealthCheckRequest(s.handler, pingReq)
- ExpectStatusCode(c,
- "",
- http.StatusOK,
- response)
- want := `{"health":"OK"}`
- if !strings.Contains(response.Body.String(), want) {
- c.Errorf("expected response to include %s: got %s", want, response.Body.String())
- }
-}
diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
deleted file mode 100644
index 63a23687ec..0000000000
--- a/services/keepstore/handlers.go
+++ /dev/null
@@ -1,1050 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "container/list"
- "context"
- "crypto/md5"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "os"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "git.arvados.org/arvados.git/sdk/go/health"
- "git.arvados.org/arvados.git/sdk/go/httpserver"
- "github.com/gorilla/mux"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/sirupsen/logrus"
-)
-
-type router struct {
- *mux.Router
- cluster *arvados.Cluster
- logger logrus.FieldLogger
- remoteProxy remoteProxy
- metrics *nodeMetrics
- volmgr *RRVolumeManager
- pullq *WorkQueue
- trashq *WorkQueue
-}
-
-// MakeRESTRouter returns a new router that forwards all Keep requests
-// to the appropriate handlers.
-func MakeRESTRouter(ctx context.Context, cluster *arvados.Cluster, reg *prometheus.Registry, volmgr *RRVolumeManager, pullq, trashq *WorkQueue) http.Handler {
- rtr := &router{
- Router: mux.NewRouter(),
- cluster: cluster,
- logger: ctxlog.FromContext(ctx),
- metrics: &nodeMetrics{reg: reg},
- volmgr: volmgr,
- pullq: pullq,
- trashq: trashq,
- }
-
- rtr.HandleFunc(
- `/{hash:[0-9a-f]{32}}`, rtr.handleGET).Methods("GET", "HEAD")
- rtr.HandleFunc(
- `/{hash:[0-9a-f]{32}}+{hints}`,
- rtr.handleGET).Methods("GET", "HEAD")
-
- rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handlePUT).Methods("PUT")
- rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handleDELETE).Methods("DELETE")
- // List all blocks stored here. Privileged client only.
- rtr.HandleFunc(`/index`, rtr.handleIndex).Methods("GET", "HEAD")
- // List blocks stored here whose hash has the given prefix.
- // Privileged client only.
- rtr.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, rtr.handleIndex).Methods("GET", "HEAD")
- // Update timestamp on existing block. Privileged client only.
- rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handleTOUCH).Methods("TOUCH")
-
- // Internals/debugging info (runtime.MemStats)
- rtr.HandleFunc(`/debug.json`, rtr.DebugHandler).Methods("GET", "HEAD")
-
- // List volumes: path, device number, bytes used/avail.
- rtr.HandleFunc(`/status.json`, rtr.StatusHandler).Methods("GET", "HEAD")
-
- // List mounts: UUID, readonly, tier, device ID, ...
- rtr.HandleFunc(`/mounts`, rtr.MountsHandler).Methods("GET")
- rtr.HandleFunc(`/mounts/{uuid}/blocks`, rtr.handleIndex).Methods("GET")
- rtr.HandleFunc(`/mounts/{uuid}/blocks/`, rtr.handleIndex).Methods("GET")
-
- // Replace the current pull queue.
- rtr.HandleFunc(`/pull`, rtr.handlePull).Methods("PUT")
-
- // Replace the current trash queue.
- rtr.HandleFunc(`/trash`, rtr.handleTrash).Methods("PUT")
-
- // Untrash moves blocks from trash back into store
- rtr.HandleFunc(`/untrash/{hash:[0-9a-f]{32}}`, rtr.handleUntrash).Methods("PUT")
-
- rtr.Handle("/_health/{check}", &health.Handler{
- Token: cluster.ManagementToken,
- Prefix: "/_health/",
- }).Methods("GET")
-
- // Any request which does not match any of these routes gets
- // 400 Bad Request.
- rtr.NotFoundHandler = http.HandlerFunc(BadRequestHandler)
-
- rtr.metrics.setupBufferPoolMetrics(bufs)
- rtr.metrics.setupWorkQueueMetrics(rtr.pullq, "pull")
- rtr.metrics.setupWorkQueueMetrics(rtr.trashq, "trash")
-
- return rtr
-}
-
-// BadRequestHandler is a HandleFunc to address bad requests.
-func BadRequestHandler(w http.ResponseWriter, r *http.Request) {
- http.Error(w, BadRequestError.Error(), BadRequestError.HTTPCode)
-}
-
-func (rtr *router) handleGET(resp http.ResponseWriter, req *http.Request) {
- locator := req.URL.Path[1:]
- if strings.Contains(locator, "+R") && !strings.Contains(locator, "+A") {
- rtr.remoteProxy.Get(req.Context(), resp, req, rtr.cluster, rtr.volmgr)
- return
- }
-
- if rtr.cluster.Collections.BlobSigning {
- locator := req.URL.Path[1:] // strip leading slash
- if err := VerifySignature(rtr.cluster, locator, GetAPIToken(req)); err != nil {
- http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
- return
- }
- }
-
- // TODO: Probe volumes to check whether the block _might_
- // exist. Some volumes/types could support a quick existence
- // check without causing other operations to suffer. If all
- // volumes support that, and assure us the block definitely
- // isn't here, we can return 404 now instead of waiting for a
- // buffer.
-
- buf, err := getBufferWithContext(req.Context(), bufs, BlockSize)
- if err != nil {
- http.Error(resp, err.Error(), http.StatusServiceUnavailable)
- return
- }
- defer bufs.Put(buf)
-
- size, err := GetBlock(req.Context(), rtr.volmgr, mux.Vars(req)["hash"], buf, resp)
- if err != nil {
- code := http.StatusInternalServerError
- if err, ok := err.(*KeepError); ok {
- code = err.HTTPCode
- }
- http.Error(resp, err.Error(), code)
- return
- }
-
- resp.Header().Set("Content-Length", strconv.Itoa(size))
- resp.Header().Set("Content-Type", "application/octet-stream")
- resp.Write(buf[:size])
-}
-
-// Get a buffer from the pool -- but give up and return a non-nil
-// error if ctx ends before we get a buffer.
-func getBufferWithContext(ctx context.Context, bufs *bufferPool, bufSize int) ([]byte, error) {
- bufReady := make(chan []byte)
- go func() {
- bufReady <- bufs.Get(bufSize)
- }()
- select {
- case buf := <-bufReady:
- return buf, nil
- case <-ctx.Done():
- go func() {
- // Even if closeNotifier happened first, we
- // need to keep waiting for our buf so we can
- // return it to the pool.
- bufs.Put(<-bufReady)
- }()
- return nil, ErrClientDisconnect
- }
-}
-
-func (rtr *router) handleTOUCH(resp http.ResponseWriter, req *http.Request) {
- if !rtr.isSystemAuth(GetAPIToken(req)) {
- http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
- return
- }
- hash := mux.Vars(req)["hash"]
- vols := rtr.volmgr.AllWritable()
- if len(vols) == 0 {
- http.Error(resp, "no volumes", http.StatusNotFound)
- return
- }
- var err error
- for _, mnt := range vols {
- err = mnt.Touch(hash)
- if err == nil {
- break
- }
- }
- switch {
- case err == nil:
- return
- case os.IsNotExist(err):
- http.Error(resp, err.Error(), http.StatusNotFound)
- default:
- http.Error(resp, err.Error(), http.StatusInternalServerError)
- }
-}
-
-func (rtr *router) handlePUT(resp http.ResponseWriter, req *http.Request) {
- hash := mux.Vars(req)["hash"]
-
- // Detect as many error conditions as possible before reading
- // the body: avoid transmitting data that will not end up
- // being written anyway.
-
- if req.ContentLength == -1 {
- http.Error(resp, SizeRequiredError.Error(), SizeRequiredError.HTTPCode)
- return
- }
-
- if req.ContentLength > BlockSize {
- http.Error(resp, TooLongError.Error(), TooLongError.HTTPCode)
- return
- }
-
- if len(rtr.volmgr.AllWritable()) == 0 {
- http.Error(resp, FullError.Error(), FullError.HTTPCode)
- return
- }
-
- var wantStorageClasses []string
- if hdr := req.Header.Get("X-Keep-Storage-Classes"); hdr != "" {
- wantStorageClasses = strings.Split(hdr, ",")
- for i, sc := range wantStorageClasses {
- wantStorageClasses[i] = strings.TrimSpace(sc)
- }
- } else {
- // none specified -- use configured default
- for class, cfg := range rtr.cluster.StorageClasses {
- if cfg.Default {
- wantStorageClasses = append(wantStorageClasses, class)
- }
- }
- }
-
- buf, err := getBufferWithContext(req.Context(), bufs, int(req.ContentLength))
- if err != nil {
- http.Error(resp, err.Error(), http.StatusServiceUnavailable)
- return
- }
-
- _, err = io.ReadFull(req.Body, buf)
- if err != nil {
- http.Error(resp, err.Error(), 500)
- bufs.Put(buf)
- return
- }
-
- result, err := PutBlock(req.Context(), rtr.volmgr, buf, hash, wantStorageClasses)
- bufs.Put(buf)
-
- if err != nil {
- code := http.StatusInternalServerError
- if err, ok := err.(*KeepError); ok {
- code = err.HTTPCode
- }
- http.Error(resp, err.Error(), code)
- return
- }
-
- // Success; add a size hint, sign the locator if possible, and
- // return it to the client.
- returnHash := fmt.Sprintf("%s+%d", hash, req.ContentLength)
- apiToken := GetAPIToken(req)
- if rtr.cluster.Collections.BlobSigningKey != "" && apiToken != "" {
- expiry := time.Now().Add(rtr.cluster.Collections.BlobSigningTTL.Duration())
- returnHash = SignLocator(rtr.cluster, returnHash, apiToken, expiry)
- }
- resp.Header().Set("X-Keep-Replicas-Stored", result.TotalReplication())
- resp.Header().Set("X-Keep-Storage-Classes-Confirmed", result.ClassReplication())
- resp.Write([]byte(returnHash + "\n"))
-}
-
-// IndexHandler responds to "/index", "/index/{prefix}", and
-// "/mounts/{uuid}/blocks" requests.
-func (rtr *router) handleIndex(resp http.ResponseWriter, req *http.Request) {
- if !rtr.isSystemAuth(GetAPIToken(req)) {
- http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
- return
- }
-
- prefix := mux.Vars(req)["prefix"]
- if prefix == "" {
- req.ParseForm()
- prefix = req.Form.Get("prefix")
- }
-
- uuid := mux.Vars(req)["uuid"]
-
- var vols []*VolumeMount
- if uuid == "" {
- vols = rtr.volmgr.AllReadable()
- } else if mnt := rtr.volmgr.Lookup(uuid, false); mnt == nil {
- http.Error(resp, "mount not found", http.StatusNotFound)
- return
- } else {
- vols = []*VolumeMount{mnt}
- }
-
- for _, v := range vols {
- if err := v.IndexTo(prefix, resp); err != nil {
- // We can't send an error status/message to
- // the client because IndexTo() might have
- // already written body content. All we can do
- // is log the error in our own logs.
- //
- // The client must notice the lack of trailing
- // newline as an indication that the response
- // is incomplete.
- ctxlog.FromContext(req.Context()).WithError(err).Errorf("truncating index response after error from volume %s", v)
- return
- }
- }
- // An empty line at EOF is the only way the client can be
- // assured the entire index was received.
- resp.Write([]byte{'\n'})
-}
-
-// MountsHandler responds to "GET /mounts" requests.
-func (rtr *router) MountsHandler(resp http.ResponseWriter, req *http.Request) {
- err := json.NewEncoder(resp).Encode(rtr.volmgr.Mounts())
- if err != nil {
- httpserver.Error(resp, err.Error(), http.StatusInternalServerError)
- }
-}
-
-// PoolStatus struct
-type PoolStatus struct {
- Alloc uint64 `json:"BytesAllocatedCumulative"`
- Cap int `json:"BuffersMax"`
- Len int `json:"BuffersInUse"`
-}
-
-type volumeStatusEnt struct {
- Label string
- Status *VolumeStatus `json:",omitempty"`
- VolumeStats *ioStats `json:",omitempty"`
- InternalStats interface{} `json:",omitempty"`
-}
-
-// NodeStatus struct
-type NodeStatus struct {
- Volumes []*volumeStatusEnt
- BufferPool PoolStatus
- PullQueue WorkQueueStatus
- TrashQueue WorkQueueStatus
- RequestsCurrent int
- RequestsMax int
- Version string
-}
-
-var st NodeStatus
-var stLock sync.Mutex
-
-// DebugHandler addresses /debug.json requests.
-func (rtr *router) DebugHandler(resp http.ResponseWriter, req *http.Request) {
- type debugStats struct {
- MemStats runtime.MemStats
- }
- var ds debugStats
- runtime.ReadMemStats(&ds.MemStats)
- data, err := json.Marshal(&ds)
- if err != nil {
- http.Error(resp, err.Error(), http.StatusInternalServerError)
- return
- }
- resp.Write(data)
-}
-
-// StatusHandler addresses /status.json requests.
-func (rtr *router) StatusHandler(resp http.ResponseWriter, req *http.Request) {
- stLock.Lock()
- rtr.readNodeStatus(&st)
- data, err := json.Marshal(&st)
- stLock.Unlock()
- if err != nil {
- http.Error(resp, err.Error(), http.StatusInternalServerError)
- return
- }
- resp.Write(data)
-}
-
-// populate the given NodeStatus struct with current values.
-func (rtr *router) readNodeStatus(st *NodeStatus) {
- st.Version = strings.SplitN(cmd.Version.String(), " ", 2)[0]
- vols := rtr.volmgr.AllReadable()
- if cap(st.Volumes) < len(vols) {
- st.Volumes = make([]*volumeStatusEnt, len(vols))
- }
- st.Volumes = st.Volumes[:0]
- for _, vol := range vols {
- var internalStats interface{}
- if vol, ok := vol.Volume.(InternalStatser); ok {
- internalStats = vol.InternalStats()
- }
- st.Volumes = append(st.Volumes, &volumeStatusEnt{
- Label: vol.String(),
- Status: vol.Status(),
- InternalStats: internalStats,
- //VolumeStats: rtr.volmgr.VolumeStats(vol),
- })
- }
- st.BufferPool.Alloc = bufs.Alloc()
- st.BufferPool.Cap = bufs.Cap()
- st.BufferPool.Len = bufs.Len()
- st.PullQueue = getWorkQueueStatus(rtr.pullq)
- st.TrashQueue = getWorkQueueStatus(rtr.trashq)
-}
-
-// return a WorkQueueStatus for the given queue. If q is nil (which
-// should never happen except in test suites), return a zero status
-// value instead of crashing.
-func getWorkQueueStatus(q *WorkQueue) WorkQueueStatus {
- if q == nil {
- // This should only happen during tests.
- return WorkQueueStatus{}
- }
- return q.Status()
-}
-
-// handleDELETE processes DELETE requests.
-//
-// DELETE /{hash:[0-9a-f]{32} will delete the block with the specified hash
-// from all connected volumes.
-//
-// Only the Data Manager, or an Arvados admin with scope "all", are
-// allowed to issue DELETE requests. If a DELETE request is not
-// authenticated or is issued by a non-admin user, the server returns
-// a PermissionError.
-//
-// Upon receiving a valid request from an authorized user,
-// handleDELETE deletes all copies of the specified block on local
-// writable volumes.
-//
-// Response format:
-//
-// If the requested blocks was not found on any volume, the response
-// code is HTTP 404 Not Found.
-//
-// Otherwise, the response code is 200 OK, with a response body
-// consisting of the JSON message
-//
-// {"copies_deleted":d,"copies_failed":f}
-//
-// where d and f are integers representing the number of blocks that
-// were successfully and unsuccessfully deleted.
-//
-func (rtr *router) handleDELETE(resp http.ResponseWriter, req *http.Request) {
- hash := mux.Vars(req)["hash"]
-
- // Confirm that this user is an admin and has a token with unlimited scope.
- var tok = GetAPIToken(req)
- if tok == "" || !rtr.canDelete(tok) {
- http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
- return
- }
-
- if !rtr.cluster.Collections.BlobTrash {
- http.Error(resp, MethodDisabledError.Error(), MethodDisabledError.HTTPCode)
- return
- }
-
- // Delete copies of this block from all available volumes.
- // Report how many blocks were successfully deleted, and how
- // many were found on writable volumes but not deleted.
- var result struct {
- Deleted int `json:"copies_deleted"`
- Failed int `json:"copies_failed"`
- }
- for _, vol := range rtr.volmgr.AllWritable() {
- if err := vol.Trash(hash); err == nil {
- result.Deleted++
- } else if os.IsNotExist(err) {
- continue
- } else {
- result.Failed++
- ctxlog.FromContext(req.Context()).WithError(err).Errorf("Trash(%s) failed on volume %s", hash, vol)
- }
- }
- if result.Deleted == 0 && result.Failed == 0 {
- resp.WriteHeader(http.StatusNotFound)
- return
- }
- body, err := json.Marshal(result)
- if err != nil {
- http.Error(resp, err.Error(), http.StatusInternalServerError)
- return
- }
- resp.Write(body)
-}
-
-/* PullHandler processes "PUT /pull" requests for the data manager.
- The request body is a JSON message containing a list of pull
- requests in the following format:
-
- [
- {
- "locator":"e4d909c290d0fb1ca068ffaddf22cbd0+4985",
- "servers":[
- "keep0.qr1hi.arvadosapi.com:25107",
- "keep1.qr1hi.arvadosapi.com:25108"
- ]
- },
- {
- "locator":"55ae4d45d2db0793d53f03e805f656e5+658395",
- "servers":[
- "10.0.1.5:25107",
- "10.0.1.6:25107",
- "10.0.1.7:25108"
- ]
- },
- ...
- ]
-
- Each pull request in the list consists of a block locator string
- and an ordered list of servers. Keepstore should try to fetch the
- block from each server in turn.
-
- If the request has not been sent by the Data Manager, return 401
- Unauthorized.
-
- If the JSON unmarshalling fails, return 400 Bad Request.
-*/
-
-// PullRequest consists of a block locator and an ordered list of servers
-type PullRequest struct {
- Locator string `json:"locator"`
- Servers []string `json:"servers"`
-
- // Destination mount, or "" for "anywhere"
- MountUUID string `json:"mount_uuid"`
-}
-
-// PullHandler processes "PUT /pull" requests for the data manager.
-func (rtr *router) handlePull(resp http.ResponseWriter, req *http.Request) {
- // Reject unauthorized requests.
- if !rtr.isSystemAuth(GetAPIToken(req)) {
- http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
- return
- }
-
- // Parse the request body.
- var pr []PullRequest
- r := json.NewDecoder(req.Body)
- if err := r.Decode(&pr); err != nil {
- http.Error(resp, err.Error(), BadRequestError.HTTPCode)
- return
- }
-
- // We have a properly formatted pull list sent from the data
- // manager. Report success and send the list to the pull list
- // manager for further handling.
- resp.WriteHeader(http.StatusOK)
- resp.Write([]byte(
- fmt.Sprintf("Received %d pull requests\n", len(pr))))
-
- plist := list.New()
- for _, p := range pr {
- plist.PushBack(p)
- }
- rtr.pullq.ReplaceQueue(plist)
-}
-
-// TrashRequest consists of a block locator and its Mtime
-type TrashRequest struct {
- Locator string `json:"locator"`
- BlockMtime int64 `json:"block_mtime"`
-
- // Target mount, or "" for "everywhere"
- MountUUID string `json:"mount_uuid"`
-}
-
-// TrashHandler processes /trash requests.
-func (rtr *router) handleTrash(resp http.ResponseWriter, req *http.Request) {
- // Reject unauthorized requests.
- if !rtr.isSystemAuth(GetAPIToken(req)) {
- http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
- return
- }
-
- // Parse the request body.
- var trash []TrashRequest
- r := json.NewDecoder(req.Body)
- if err := r.Decode(&trash); err != nil {
- http.Error(resp, err.Error(), BadRequestError.HTTPCode)
- return
- }
-
- // We have a properly formatted trash list sent from the data
- // manager. Report success and send the list to the trash work
- // queue for further handling.
- resp.WriteHeader(http.StatusOK)
- resp.Write([]byte(
- fmt.Sprintf("Received %d trash requests\n", len(trash))))
-
- tlist := list.New()
- for _, t := range trash {
- tlist.PushBack(t)
- }
- rtr.trashq.ReplaceQueue(tlist)
-}
-
-// UntrashHandler processes "PUT /untrash/{hash:[0-9a-f]{32}}" requests for the data manager.
-func (rtr *router) handleUntrash(resp http.ResponseWriter, req *http.Request) {
- // Reject unauthorized requests.
- if !rtr.isSystemAuth(GetAPIToken(req)) {
- http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
- return
- }
-
- log := ctxlog.FromContext(req.Context())
- hash := mux.Vars(req)["hash"]
-
- if len(rtr.volmgr.AllWritable()) == 0 {
- http.Error(resp, "No writable volumes", http.StatusNotFound)
- return
- }
-
- var untrashedOn, failedOn []string
- var numNotFound int
- for _, vol := range rtr.volmgr.AllWritable() {
- err := vol.Untrash(hash)
-
- if os.IsNotExist(err) {
- numNotFound++
- } else if err != nil {
- log.WithError(err).Errorf("Error untrashing %v on volume %s", hash, vol)
- failedOn = append(failedOn, vol.String())
- } else {
- log.Infof("Untrashed %v on volume %v", hash, vol.String())
- untrashedOn = append(untrashedOn, vol.String())
- }
- }
-
- if numNotFound == len(rtr.volmgr.AllWritable()) {
- http.Error(resp, "Block not found on any of the writable volumes", http.StatusNotFound)
- } else if len(failedOn) == len(rtr.volmgr.AllWritable()) {
- http.Error(resp, "Failed to untrash on all writable volumes", http.StatusInternalServerError)
- } else {
- respBody := "Successfully untrashed on: " + strings.Join(untrashedOn, ", ")
- if len(failedOn) > 0 {
- respBody += "; Failed to untrash on: " + strings.Join(failedOn, ", ")
- http.Error(resp, respBody, http.StatusInternalServerError)
- } else {
- fmt.Fprintln(resp, respBody)
- }
- }
-}
-
-// GetBlock and PutBlock implement lower-level code for handling
-// blocks by rooting through volumes connected to the local machine.
-// Once the handler has determined that system policy permits the
-// request, it calls these methods to perform the actual operation.
-//
-// TODO(twp): this code would probably be better located in the
-// VolumeManager interface. As an abstraction, the VolumeManager
-// should be the only part of the code that cares about which volume a
-// block is stored on, so it should be responsible for figuring out
-// which volume to check for fetching blocks, storing blocks, etc.
-
-// GetBlock fetches the block identified by "hash" into the provided
-// buf, and returns the data size.
-//
-// If the block cannot be found on any volume, returns NotFoundError.
-//
-// If the block found does not have the correct MD5 hash, returns
-// DiskHashError.
-//
-func GetBlock(ctx context.Context, volmgr *RRVolumeManager, hash string, buf []byte, resp http.ResponseWriter) (int, error) {
- log := ctxlog.FromContext(ctx)
-
- // Attempt to read the requested hash from a keep volume.
- errorToCaller := NotFoundError
-
- for _, vol := range volmgr.AllReadable() {
- size, err := vol.Get(ctx, hash, buf)
- select {
- case <-ctx.Done():
- return 0, ErrClientDisconnect
- default:
- }
- if err != nil {
- // IsNotExist is an expected error and may be
- // ignored. All other errors are logged. In
- // any case we continue trying to read other
- // volumes. If all volumes report IsNotExist,
- // we return a NotFoundError.
- if !os.IsNotExist(err) {
- log.WithError(err).Errorf("Get(%s) failed on %s", hash, vol)
- }
- // If some volume returns a transient error, return it to the caller
- // instead of "Not found" so it can retry.
- if err == VolumeBusyError {
- errorToCaller = err.(*KeepError)
- }
- continue
- }
- // Check the file checksum.
- filehash := fmt.Sprintf("%x", md5.Sum(buf[:size]))
- if filehash != hash {
- // TODO: Try harder to tell a sysadmin about
- // this.
- log.Errorf("checksum mismatch for block %s (actual %s), size %d on %s", hash, filehash, size, vol)
- errorToCaller = DiskHashError
- continue
- }
- if errorToCaller == DiskHashError {
- log.Warn("after checksum mismatch for block %s on a different volume, a good copy was found on volume %s and returned", hash, vol)
- }
- return size, nil
- }
- return 0, errorToCaller
-}
-
-type putProgress struct {
- classNeeded map[string]bool
- classTodo map[string]bool
- mountUsed map[*VolumeMount]bool
- totalReplication int
- classDone map[string]int
-}
-
-// Number of distinct replicas stored. "2" can mean the block was
-// stored on 2 different volumes with replication 1, or on 1 volume
-// with replication 2.
-func (pr putProgress) TotalReplication() string {
- return strconv.Itoa(pr.totalReplication)
-}
-
-// Number of replicas satisfying each storage class, formatted like
-// "default=2; special=1".
-func (pr putProgress) ClassReplication() string {
- s := ""
- for k, v := range pr.classDone {
- if len(s) > 0 {
- s += ", "
- }
- s += k + "=" + strconv.Itoa(v)
- }
- return s
-}
-
-func (pr *putProgress) Add(mnt *VolumeMount) {
- if pr.mountUsed[mnt] {
- logrus.Warnf("BUG? superfluous extra write to mount %s", mnt.UUID)
- return
- }
- pr.mountUsed[mnt] = true
- pr.totalReplication += mnt.Replication
- for class := range mnt.StorageClasses {
- pr.classDone[class] += mnt.Replication
- delete(pr.classTodo, class)
- }
-}
-
-func (pr *putProgress) Sub(mnt *VolumeMount) {
- if !pr.mountUsed[mnt] {
- logrus.Warnf("BUG? Sub called with no prior matching Add: %s", mnt.UUID)
- return
- }
- pr.mountUsed[mnt] = false
- pr.totalReplication -= mnt.Replication
- for class := range mnt.StorageClasses {
- pr.classDone[class] -= mnt.Replication
- if pr.classNeeded[class] {
- pr.classTodo[class] = true
- }
- }
-}
-
-func (pr *putProgress) Done() bool {
- return len(pr.classTodo) == 0 && pr.totalReplication > 0
-}
-
-func (pr *putProgress) Want(mnt *VolumeMount) bool {
- if pr.Done() || pr.mountUsed[mnt] {
- return false
- }
- if len(pr.classTodo) == 0 {
- // none specified == "any"
- return true
- }
- for class := range mnt.StorageClasses {
- if pr.classTodo[class] {
- return true
- }
- }
- return false
-}
-
-func (pr *putProgress) Copy() *putProgress {
- cp := putProgress{
- classNeeded: pr.classNeeded,
- classTodo: make(map[string]bool, len(pr.classTodo)),
- classDone: make(map[string]int, len(pr.classDone)),
- mountUsed: make(map[*VolumeMount]bool, len(pr.mountUsed)),
- totalReplication: pr.totalReplication,
- }
- for k, v := range pr.classTodo {
- cp.classTodo[k] = v
- }
- for k, v := range pr.classDone {
- cp.classDone[k] = v
- }
- for k, v := range pr.mountUsed {
- cp.mountUsed[k] = v
- }
- return &cp
-}
-
-func newPutProgress(classes []string) putProgress {
- pr := putProgress{
- classNeeded: make(map[string]bool, len(classes)),
- classTodo: make(map[string]bool, len(classes)),
- classDone: map[string]int{},
- mountUsed: map[*VolumeMount]bool{},
- }
- for _, c := range classes {
- if c != "" {
- pr.classNeeded[c] = true
- pr.classTodo[c] = true
- }
- }
- return pr
-}
-
-// PutBlock stores the given block on one or more volumes.
-//
-// The MD5 checksum of the block must match the given hash.
-//
-// The block is written to each writable volume (ordered by priority
-// and then UUID, see volume.go) until at least one replica has been
-// stored in each of the requested storage classes.
-//
-// The returned error, if any, is a KeepError with one of the
-// following codes:
-//
-// 500 Collision
-// A different block with the same hash already exists on this
-// Keep server.
-// 422 MD5Fail
-// The MD5 hash of the BLOCK does not match the argument HASH.
-// 503 Full
-// There was not enough space left in any Keep volume to store
-// the object.
-// 500 Fail
-// The object could not be stored for some other reason (e.g.
-// all writes failed). The text of the error message should
-// provide as much detail as possible.
-func PutBlock(ctx context.Context, volmgr *RRVolumeManager, block []byte, hash string, wantStorageClasses []string) (putProgress, error) {
- log := ctxlog.FromContext(ctx)
-
- // Check that BLOCK's checksum matches HASH.
- blockhash := fmt.Sprintf("%x", md5.Sum(block))
- if blockhash != hash {
- log.Printf("%s: MD5 checksum %s did not match request", hash, blockhash)
- return putProgress{}, RequestHashError
- }
-
- result := newPutProgress(wantStorageClasses)
-
- // If we already have this data, it's intact on disk, and we
- // can update its timestamp, return success. If we have
- // different data with the same hash, return failure.
- if err := CompareAndTouch(ctx, volmgr, hash, block, &result); err != nil || result.Done() {
- return result, err
- }
- if ctx.Err() != nil {
- return result, ErrClientDisconnect
- }
-
- writables := volmgr.NextWritable()
- if len(writables) == 0 {
- log.Error("no writable volumes")
- return result, FullError
- }
-
- var wg sync.WaitGroup
- var mtx sync.Mutex
- cond := sync.Cond{L: &mtx}
- // pending predicts what result will be if all pending writes
- // succeed.
- pending := result.Copy()
- var allFull atomic.Value
- allFull.Store(true)
-
- // We hold the lock for the duration of the "each volume" loop
- // below, except when it is released during cond.Wait().
- mtx.Lock()
-
- for _, mnt := range writables {
- // Wait until our decision to use this mount does not
- // depend on the outcome of pending writes.
- for result.Want(mnt) && !pending.Want(mnt) {
- cond.Wait()
- }
- if !result.Want(mnt) {
- continue
- }
- mnt := mnt
- pending.Add(mnt)
- wg.Add(1)
- go func() {
- log.Debugf("PutBlock: start write to %s", mnt.UUID)
- defer wg.Done()
- err := mnt.Put(ctx, hash, block)
-
- mtx.Lock()
- if err != nil {
- log.Debugf("PutBlock: write to %s failed", mnt.UUID)
- pending.Sub(mnt)
- } else {
- log.Debugf("PutBlock: write to %s succeeded", mnt.UUID)
- result.Add(mnt)
- }
- cond.Broadcast()
- mtx.Unlock()
-
- if err != nil && err != FullError && ctx.Err() == nil {
- // The volume is not full but the
- // write did not succeed. Report the
- // error and continue trying.
- allFull.Store(false)
- log.WithError(err).Errorf("%s: Put(%s) failed", mnt.Volume, hash)
- }
- }()
- }
- mtx.Unlock()
- wg.Wait()
- if ctx.Err() != nil {
- return result, ErrClientDisconnect
- }
- if result.Done() {
- return result, nil
- }
-
- if result.totalReplication > 0 {
- // Some, but not all, of the storage classes were
- // satisfied. This qualifies as success.
- return result, nil
- } else if allFull.Load().(bool) {
- log.Error("all volumes with qualifying storage classes are full")
- return putProgress{}, FullError
- } else {
- // Already logged the non-full errors.
- return putProgress{}, GenericError
- }
-}
-
-// CompareAndTouch looks for volumes where the given content already
-// exists and its modification time can be updated (i.e., it is
-// protected from garbage collection), and updates result accordingly.
-// It returns when the result is Done() or all volumes have been
-// checked.
-func CompareAndTouch(ctx context.Context, volmgr *RRVolumeManager, hash string, buf []byte, result *putProgress) error {
- log := ctxlog.FromContext(ctx)
- for _, mnt := range volmgr.AllWritable() {
- if !result.Want(mnt) {
- continue
- }
- err := mnt.Compare(ctx, hash, buf)
- if ctx.Err() != nil {
- return nil
- } else if err == CollisionError {
- // Stop if we have a block with same hash but
- // different content. (It will be impossible
- // to tell which one is wanted if we have
- // both, so there's no point writing it even
- // on a different volume.)
- log.Errorf("collision in Compare(%s) on volume %s", hash, mnt.Volume)
- return CollisionError
- } else if os.IsNotExist(err) {
- // Block does not exist. This is the only
- // "normal" error: we don't log anything.
- continue
- } else if err != nil {
- // Couldn't open file, data is corrupt on
- // disk, etc.: log this abnormal condition,
- // and try the next volume.
- log.WithError(err).Warnf("error in Compare(%s) on volume %s", hash, mnt.Volume)
- continue
- }
- if err := mnt.Touch(hash); err != nil {
- log.WithError(err).Errorf("error in Touch(%s) on volume %s", hash, mnt.Volume)
- continue
- }
- // Compare and Touch both worked --> done.
- result.Add(mnt)
- if result.Done() {
- return nil
- }
- }
- return nil
-}
-
-var validLocatorRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
-
-// IsValidLocator returns true if the specified string is a valid Keep locator.
-// When Keep is extended to support hash types other than MD5,
-// this should be updated to cover those as well.
-//
-func IsValidLocator(loc string) bool {
- return validLocatorRe.MatchString(loc)
-}
-
-var authRe = regexp.MustCompile(`^(OAuth2|Bearer)\s+(.*)`)
-
-// GetAPIToken returns the OAuth2 token from the Authorization
-// header of a HTTP request, or an empty string if no matching
-// token is found.
-func GetAPIToken(req *http.Request) string {
- if auth, ok := req.Header["Authorization"]; ok {
- if match := authRe.FindStringSubmatch(auth[0]); match != nil {
- return match[2]
- }
- }
- return ""
-}
-
-// canDelete returns true if the user identified by apiToken is
-// allowed to delete blocks.
-func (rtr *router) canDelete(apiToken string) bool {
- if apiToken == "" {
- return false
- }
- // Blocks may be deleted only when Keep has been configured with a
- // data manager.
- if rtr.isSystemAuth(apiToken) {
- return true
- }
- // TODO(twp): look up apiToken with the API server
- // return true if is_admin is true and if the token
- // has unlimited scope
- return false
-}
-
-// isSystemAuth returns true if the given token is allowed to perform
-// system level actions like deleting data.
-func (rtr *router) isSystemAuth(token string) bool {
- return token != "" && token == rtr.cluster.SystemRootToken
-}
diff --git a/services/keepstore/hashcheckwriter.go b/services/keepstore/hashcheckwriter.go
new file mode 100644
index 0000000000..f191c98e4b
--- /dev/null
+++ b/services/keepstore/hashcheckwriter.go
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "fmt"
+ "hash"
+ "io"
+)
+
+type hashCheckWriter struct {
+ writer io.Writer
+ hash hash.Hash
+ expectSize int64
+ expectDigest string
+
+ offset int64
+}
+
+// newHashCheckWriter returns a writer that writes through to w, but
+// stops short if the written content reaches expectSize bytes and
+// does not match expectDigest according to the given hash
+// function.
+//
+// It returns a write error if more than expectSize bytes are written.
+//
+// Thus, in case of a hash mismatch, fewer than expectSize will be
+// written through.
+func newHashCheckWriter(writer io.Writer, hash hash.Hash, expectSize int64, expectDigest string) io.Writer {
+ return &hashCheckWriter{
+ writer: writer,
+ hash: hash,
+ expectSize: expectSize,
+ expectDigest: expectDigest,
+ }
+}
+
+func (hcw *hashCheckWriter) Write(p []byte) (int, error) {
+ if todo := hcw.expectSize - hcw.offset - int64(len(p)); todo < 0 {
+ // Writing beyond expected size returns a checksum
+ // error without even checking the hash.
+ return 0, errChecksum
+ } else if todo > 0 {
+ // This isn't the last write, so we pass it through.
+ _, err := hcw.hash.Write(p)
+ if err != nil {
+ return 0, err
+ }
+ n, err := hcw.writer.Write(p)
+ hcw.offset += int64(n)
+ return n, err
+ } else {
+ // This is the last write, so we check the hash before
+ // writing through.
+ _, err := hcw.hash.Write(p)
+ if err != nil {
+ return 0, err
+ }
+ if digest := fmt.Sprintf("%x", hcw.hash.Sum(nil)); digest != hcw.expectDigest {
+ return 0, errChecksum
+ }
+ // Ensure subsequent write will fail
+ hcw.offset = hcw.expectSize + 1
+ return hcw.writer.Write(p)
+ }
+}
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index b9dbe2777e..60d062e1e3 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -2,56 +2,764 @@
//
// SPDX-License-Identifier: AGPL-3.0
+// Package keepstore implements the keepstore service component and
+// back-end storage drivers.
+//
+// It is an internal module, only intended to be imported by
+// /cmd/arvados-server and other server-side components in this
+// repository.
package keepstore
import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
"time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+// Maximum size of a keep block is 64 MiB.
+const BlockSize = 1 << 26
+
+var (
+ errChecksum = httpserver.ErrorWithStatus(errors.New("checksum mismatch in stored data"), http.StatusBadGateway)
+ errNoTokenProvided = httpserver.ErrorWithStatus(errors.New("no token provided in Authorization header"), http.StatusUnauthorized)
+ errMethodNotAllowed = httpserver.ErrorWithStatus(errors.New("method not allowed"), http.StatusMethodNotAllowed)
+ errVolumeUnavailable = httpserver.ErrorWithStatus(errors.New("volume unavailable"), http.StatusServiceUnavailable)
+ errCollision = httpserver.ErrorWithStatus(errors.New("hash collision"), http.StatusInternalServerError)
+ errExpiredSignature = httpserver.ErrorWithStatus(errors.New("expired signature"), http.StatusUnauthorized)
+ errInvalidSignature = httpserver.ErrorWithStatus(errors.New("invalid signature"), http.StatusBadRequest)
+ errInvalidLocator = httpserver.ErrorWithStatus(errors.New("invalid locator"), http.StatusBadRequest)
+ errFull = httpserver.ErrorWithStatus(errors.New("insufficient storage"), http.StatusInsufficientStorage)
+ errTooLarge = httpserver.ErrorWithStatus(errors.New("request entity too large"), http.StatusRequestEntityTooLarge)
+ driver = make(map[string]volumeDriver)
)
-// BlockSize for a Keep "block" is 64MB.
-const BlockSize = 64 * 1024 * 1024
+type indexOptions struct {
+ MountUUID string
+ Prefix string
+ WriteTo io.Writer
+}
+
+type mount struct {
+ arvados.KeepMount
+ volume
+ priority int
+}
+
+type keepstore struct {
+ cluster *arvados.Cluster
+ logger logrus.FieldLogger
+ serviceURL arvados.URL
+ mounts map[string]*mount
+ mountsR []*mount
+ mountsW []*mount
+ bufferPool *bufferPool
+
+ iostats map[volume]*ioStats
+
+ remoteClients map[string]*keepclient.KeepClient
+ remoteClientsMtx sync.Mutex
+}
+
+func newKeepstore(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry, serviceURL arvados.URL) (*keepstore, error) {
+ logger := ctxlog.FromContext(ctx)
+
+ if cluster.API.MaxConcurrentRequests > 0 && cluster.API.MaxConcurrentRequests < cluster.API.MaxKeepBlobBuffers {
+ logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", cluster.API.MaxKeepBlobBuffers, cluster.API.MaxConcurrentRequests)
+ }
+
+ if cluster.Collections.BlobSigningKey != "" {
+ } else if cluster.Collections.BlobSigning {
+ return nil, errors.New("cannot enable Collections.BlobSigning with no Collections.BlobSigningKey")
+ } else {
+ logger.Warn("Running without a blob signing key. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions. To fix this, configure Collections.BlobSigning and Collections.BlobSigningKey.")
+ }
+
+ if cluster.API.MaxKeepBlobBuffers <= 0 {
+ return nil, fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
+ }
+ bufferPool := newBufferPool(logger, cluster.API.MaxKeepBlobBuffers, reg)
+
+ ks := &keepstore{
+ cluster: cluster,
+ logger: logger,
+ serviceURL: serviceURL,
+ bufferPool: bufferPool,
+ remoteClients: make(map[string]*keepclient.KeepClient),
+ }
+
+ err := ks.setupMounts(newVolumeMetricsVecs(reg))
+ if err != nil {
+ return nil, err
+ }
+
+ return ks, nil
+}
+
+func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
+ ks.mounts = make(map[string]*mount)
+ if len(ks.cluster.Volumes) == 0 {
+ return errors.New("no volumes configured")
+ }
+ for uuid, cfgvol := range ks.cluster.Volumes {
+ va, ok := cfgvol.AccessViaHosts[ks.serviceURL]
+ if !ok && len(cfgvol.AccessViaHosts) > 0 {
+ continue
+ }
+ dri, ok := driver[cfgvol.Driver]
+ if !ok {
+ return fmt.Errorf("volume %s: invalid driver %q", uuid, cfgvol.Driver)
+ }
+ vol, err := dri(newVolumeParams{
+ UUID: uuid,
+ Cluster: ks.cluster,
+ ConfigVolume: cfgvol,
+ Logger: ks.logger,
+ MetricsVecs: metrics,
+ BufferPool: ks.bufferPool,
+ })
+ if err != nil {
+ return fmt.Errorf("error initializing volume %s: %s", uuid, err)
+ }
+ sc := cfgvol.StorageClasses
+ if len(sc) == 0 {
+ sc = map[string]bool{"default": true}
+ }
+ repl := cfgvol.Replication
+ if repl < 1 {
+ repl = 1
+ }
+ pri := 0
+ for class, in := range cfgvol.StorageClasses {
+ p := ks.cluster.StorageClasses[class].Priority
+ if in && p > pri {
+ pri = p
+ }
+ }
+ mnt := &mount{
+ volume: vol,
+ priority: pri,
+ KeepMount: arvados.KeepMount{
+ UUID: uuid,
+ DeviceID: vol.DeviceID(),
+ AllowWrite: !va.ReadOnly && !cfgvol.ReadOnly,
+ AllowTrash: !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
+ Replication: repl,
+ StorageClasses: sc,
+ },
+ }
+ ks.mounts[uuid] = mnt
+ ks.logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol.DeviceID(), mnt.AllowWrite, mnt.AllowTrash)
+ }
+ if len(ks.mounts) == 0 {
+ return fmt.Errorf("no volumes configured for %s", ks.serviceURL)
+ }
+
+ ks.mountsR = nil
+ ks.mountsW = nil
+ for _, mnt := range ks.mounts {
+ ks.mountsR = append(ks.mountsR, mnt)
+ if mnt.AllowWrite {
+ ks.mountsW = append(ks.mountsW, mnt)
+ }
+ }
+ // Sorting mounts by UUID makes behavior more predictable, and
+ // is convenient for testing -- for example, "index all
+ // volumes" and "trash block on all volumes" will visit
+ // volumes in predictable order.
+ sort.Slice(ks.mountsR, func(i, j int) bool { return ks.mountsR[i].UUID < ks.mountsR[j].UUID })
+ sort.Slice(ks.mountsW, func(i, j int) bool { return ks.mountsW[i].UUID < ks.mountsW[j].UUID })
+ return nil
+}
+
+// checkLocatorSignature checks that locator has a valid signature.
+// If the BlobSigning config is false, it returns nil even if the
+// signature is invalid or missing.
+func (ks *keepstore) checkLocatorSignature(ctx context.Context, locator string) error {
+ if !ks.cluster.Collections.BlobSigning {
+ return nil
+ }
+ token := ctxToken(ctx)
+ if token == "" {
+ return errNoTokenProvided
+ }
+ err := arvados.VerifySignature(locator, token, ks.cluster.Collections.BlobSigningTTL.Duration(), []byte(ks.cluster.Collections.BlobSigningKey))
+ if err == arvados.ErrSignatureExpired {
+ return errExpiredSignature
+ } else if err != nil {
+ return errInvalidSignature
+ }
+ return nil
+}
-// MinFreeKilobytes is the amount of space a Keep volume must have available
-// in order to permit writes.
-const MinFreeKilobytes = BlockSize / 1024
+// signLocator signs the locator for the given token, if possible.
+// Note this signs if the BlobSigningKey config is available, even if
+// the BlobSigning config is false.
+func (ks *keepstore) signLocator(token, locator string) string {
+ if token == "" || len(ks.cluster.Collections.BlobSigningKey) == 0 {
+ return locator
+ }
+ ttl := ks.cluster.Collections.BlobSigningTTL.Duration()
+ return arvados.SignLocator(locator, token, time.Now().Add(ttl), ttl, []byte(ks.cluster.Collections.BlobSigningKey))
+}
+
+func (ks *keepstore) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (n int, err error) {
+ li, err := getLocatorInfo(opts.Locator)
+ if err != nil {
+ return 0, err
+ }
+ out := opts.WriteTo
+ if rw, ok := out.(http.ResponseWriter); ok && li.size > 0 {
+ out = &setSizeOnWrite{ResponseWriter: rw, size: li.size}
+ }
+ if li.remote && !li.signed {
+ return ks.blockReadRemote(ctx, opts)
+ }
+ if err := ks.checkLocatorSignature(ctx, opts.Locator); err != nil {
+ return 0, err
+ }
+ hashcheck := md5.New()
+ if li.size > 0 {
+ out = newHashCheckWriter(out, hashcheck, int64(li.size), li.hash)
+ } else {
+ out = io.MultiWriter(out, hashcheck)
+ }
+
+ buf, err := ks.bufferPool.GetContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer ks.bufferPool.Put(buf)
+ streamer := newStreamWriterAt(out, 65536, buf)
+ defer streamer.Close()
-var bufs *bufferPool
+ var errToCaller error = os.ErrNotExist
+ for _, mnt := range ks.rendezvous(li.hash, ks.mountsR) {
+ if ctx.Err() != nil {
+ return 0, ctx.Err()
+ }
+ err := mnt.BlockRead(ctx, li.hash, streamer)
+ if err != nil {
+ if streamer.WroteAt() != 0 {
+ // BlockRead encountered an error
+ // after writing some data, so it's
+ // too late to try another
+ // volume. Flush streamer before
+ // calling Wrote() to ensure our
+ // return value accurately reflects
+ // the number of bytes written to
+ // opts.WriteTo.
+ streamer.Close()
+ return streamer.Wrote(), err
+ }
+ if !os.IsNotExist(err) {
+ errToCaller = err
+ }
+ continue
+ }
+ if li.size == 0 {
+ // hashCheckingWriter isn't in use because we
+ // don't know the expected size. All we can do
+ // is check after writing all the data, and
+ // trust the caller is doing a HEAD request so
+ // it's not too late to set an error code in
+ // the response header.
+ err = streamer.Close()
+ if hash := fmt.Sprintf("%x", hashcheck.Sum(nil)); hash != li.hash && err == nil {
+ err = errChecksum
+ }
+ if rw, ok := opts.WriteTo.(http.ResponseWriter); ok {
+ // We didn't set the content-length header
+ // above because we didn't know the block size
+ // until now.
+ rw.Header().Set("Content-Length", fmt.Sprintf("%d", streamer.WroteAt()))
+ }
+ return streamer.WroteAt(), err
+ } else if streamer.WroteAt() != li.size {
+ // If the backend read fewer bytes than
+ // expected but returns no error, we can
+ // classify this as a checksum error (even
+ // though hashCheckWriter doesn't know that
+ // yet, it's just waiting for the next
+ // write). If our caller is serving a GET
+ // request it's too late to do anything about
+ // it anyway, but if it's a HEAD request the
+ // caller can still change the response status
+ // code.
+ return streamer.WroteAt(), errChecksum
+ }
+ // Ensure streamer flushes all buffered data without
+ // errors.
+ err = streamer.Close()
+ return streamer.Wrote(), err
+ }
+ return 0, errToCaller
+}
+
+func (ks *keepstore) blockReadRemote(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {
+ token := ctxToken(ctx)
+ if token == "" {
+ return 0, errNoTokenProvided
+ }
+ var remoteClient *keepclient.KeepClient
+ var parts []string
+ li, err := getLocatorInfo(opts.Locator)
+ if err != nil {
+ return 0, err
+ }
+ for i, part := range strings.Split(opts.Locator, "+") {
+ switch {
+ case i == 0:
+ // don't try to parse hash part as hint
+ case strings.HasPrefix(part, "A"):
+ // drop local permission hint
+ continue
+ case len(part) > 7 && part[0] == 'R' && part[6] == '-':
+ remoteID := part[1:6]
+ remote, ok := ks.cluster.RemoteClusters[remoteID]
+ if !ok {
+ return 0, httpserver.ErrorWithStatus(errors.New("remote cluster not configured"), http.StatusBadRequest)
+ }
+ kc, err := ks.remoteClient(remoteID, remote, token)
+ if err == auth.ErrObsoleteToken {
+ return 0, httpserver.ErrorWithStatus(err, http.StatusBadRequest)
+ } else if err != nil {
+ return 0, err
+ }
+ remoteClient = kc
+ part = "A" + part[7:]
+ }
+ parts = append(parts, part)
+ }
+ if remoteClient == nil {
+ return 0, httpserver.ErrorWithStatus(errors.New("invalid remote hint"), http.StatusBadRequest)
+ }
+ locator := strings.Join(parts, "+")
+ if opts.LocalLocator == nil {
+ // Read from remote cluster and stream response back
+ // to caller
+ if rw, ok := opts.WriteTo.(http.ResponseWriter); ok && li.size > 0 {
+ rw.Header().Set("Content-Length", fmt.Sprintf("%d", li.size))
+ }
+ return remoteClient.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: locator,
+ WriteTo: opts.WriteTo,
+ })
+ }
+ // We must call LocalLocator before writing any data to
+ // opts.WriteTo, otherwise the caller can't put the local
+ // locator in a response header. So we copy into memory,
+ // generate the local signature, then copy from memory to
+ // opts.WriteTo.
+ buf, err := ks.bufferPool.GetContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer ks.bufferPool.Put(buf)
+ writebuf := bytes.NewBuffer(buf[:0])
+ ks.logger.Infof("blockReadRemote(%s): remote read(%s)", opts.Locator, locator)
+ _, err = remoteClient.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: locator,
+ WriteTo: writebuf,
+ })
+ if err != nil {
+ return 0, err
+ }
+ resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: locator,
+ Data: writebuf.Bytes(),
+ })
+ if err != nil {
+ return 0, err
+ }
+ opts.LocalLocator(resp.Locator)
+ if rw, ok := opts.WriteTo.(http.ResponseWriter); ok {
+ rw.Header().Set("Content-Length", fmt.Sprintf("%d", writebuf.Len()))
+ }
+ n, err := io.Copy(opts.WriteTo, bytes.NewReader(writebuf.Bytes()))
+ return int(n), err
+}
-// KeepError types.
+func (ks *keepstore) remoteClient(remoteID string, remoteCluster arvados.RemoteCluster, token string) (*keepclient.KeepClient, error) {
+ ks.remoteClientsMtx.Lock()
+ kc, ok := ks.remoteClients[remoteID]
+ ks.remoteClientsMtx.Unlock()
+ if !ok {
+ c := &arvados.Client{
+ APIHost: remoteCluster.Host,
+ AuthToken: "xxx",
+ Insecure: remoteCluster.Insecure,
+ }
+ ac, err := arvadosclient.New(c)
+ if err != nil {
+ return nil, err
+ }
+ kc, err = keepclient.MakeKeepClient(ac)
+ if err != nil {
+ return nil, err
+ }
+ kc.DiskCacheSize = keepclient.DiskCacheDisabled
+
+ ks.remoteClientsMtx.Lock()
+ ks.remoteClients[remoteID] = kc
+ ks.remoteClientsMtx.Unlock()
+ }
+ accopy := *kc.Arvados
+ accopy.ApiToken = token
+ kccopy := kc.Clone()
+ kccopy.Arvados = &accopy
+ token, err := auth.SaltToken(token, remoteID)
+ if err != nil {
+ return nil, err
+ }
+ kccopy.Arvados.ApiToken = token
+ return kccopy, nil
+}
+
+// BlockWrite writes a block to one or more volumes.
+func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+ var resp arvados.BlockWriteResponse
+ var hash string
+ if opts.Data == nil {
+ buf, err := ks.bufferPool.GetContext(ctx)
+ if err != nil {
+ return resp, err
+ }
+ defer ks.bufferPool.Put(buf)
+ w := bytes.NewBuffer(buf[:0])
+ h := md5.New()
+ limitedReader := &io.LimitedReader{R: opts.Reader, N: BlockSize}
+ n, err := io.Copy(io.MultiWriter(w, h), limitedReader)
+ if err != nil {
+ return resp, err
+ }
+ if limitedReader.N == 0 {
+ // Data size is either exactly BlockSize, or too big.
+ n, err := opts.Reader.Read(make([]byte, 1))
+ if n > 0 {
+ return resp, httpserver.ErrorWithStatus(err, http.StatusRequestEntityTooLarge)
+ }
+ if err != io.EOF {
+ return resp, err
+ }
+ }
+ opts.Data = buf[:n]
+ if opts.DataSize != 0 && int(n) != opts.DataSize {
+ return resp, httpserver.ErrorWithStatus(fmt.Errorf("content length %d did not match specified data size %d", n, opts.DataSize), http.StatusBadRequest)
+ }
+ hash = fmt.Sprintf("%x", h.Sum(nil))
+ } else {
+ hash = fmt.Sprintf("%x", md5.Sum(opts.Data))
+ }
+ if opts.Hash != "" && !strings.HasPrefix(opts.Hash, hash) {
+ return resp, httpserver.ErrorWithStatus(fmt.Errorf("content hash %s did not match specified locator %s", hash, opts.Hash), http.StatusBadRequest)
+ }
+ rvzmounts := ks.rendezvous(hash, ks.mountsW)
+ result := newPutProgress(opts.StorageClasses)
+ for _, mnt := range rvzmounts {
+ if !result.Want(mnt) {
+ continue
+ }
+ cmp := &checkEqual{Expect: opts.Data}
+ if err := mnt.BlockRead(ctx, hash, cmp); err == nil {
+ if !cmp.Equal() {
+ return resp, errCollision
+ }
+ err := mnt.BlockTouch(hash)
+ if err == nil {
+ result.Add(mnt)
+ }
+ }
+ }
+ var allFull atomic.Bool
+ allFull.Store(true)
+ // pending tracks what result will be if all outstanding
+ // writes succeed.
+ pending := result.Copy()
+ cond := sync.NewCond(new(sync.Mutex))
+ cond.L.Lock()
+ var wg sync.WaitGroup
+nextmnt:
+ for _, mnt := range rvzmounts {
+ for {
+ if result.Done() || ctx.Err() != nil {
+ break nextmnt
+ }
+ if !result.Want(mnt) {
+ continue nextmnt
+ }
+ if pending.Want(mnt) {
+ break
+ }
+ // This mount might not be needed, depending
+ // on the outcome of pending writes. Wait for
+ // a pending write to finish, then check
+ // again.
+ cond.Wait()
+ }
+ mnt := mnt
+ logger := ks.logger.WithField("mount", mnt.UUID)
+ pending.Add(mnt)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ logger.Debug("start write")
+ err := mnt.BlockWrite(ctx, hash, opts.Data)
+ cond.L.Lock()
+ defer cond.L.Unlock()
+ defer cond.Broadcast()
+ if err != nil {
+ logger.Debug("write failed")
+ pending.Sub(mnt)
+ if err != errFull {
+ allFull.Store(false)
+ }
+ } else {
+ result.Add(mnt)
+ pending.Sub(mnt)
+ }
+ }()
+ }
+ cond.L.Unlock()
+ wg.Wait()
+ if ctx.Err() != nil {
+ return resp, ctx.Err()
+ }
+ if result.Done() || result.totalReplication > 0 {
+ resp = arvados.BlockWriteResponse{
+ Locator: ks.signLocator(ctxToken(ctx), fmt.Sprintf("%s+%d", hash, len(opts.Data))),
+ Replicas: result.totalReplication,
+ StorageClasses: result.classDone,
+ }
+ return resp, nil
+ }
+ if allFull.Load() {
+ return resp, errFull
+ }
+ return resp, errVolumeUnavailable
+}
+
+// rendezvous sorts the given mounts by descending priority, then by
+// rendezvous order for the given locator.
+func (*keepstore) rendezvous(locator string, mnts []*mount) []*mount {
+ hash := locator
+ if len(hash) > 32 {
+ hash = hash[:32]
+ }
+ // copy the provided []*mount before doing an in-place sort
+ mnts = append([]*mount(nil), mnts...)
+ weight := make(map[*mount]string)
+ for _, mnt := range mnts {
+ uuidpart := mnt.UUID
+ if len(uuidpart) == 27 {
+ // strip zzzzz-yyyyy- prefixes
+ uuidpart = uuidpart[12:]
+ }
+ weight[mnt] = fmt.Sprintf("%x", md5.Sum([]byte(hash+uuidpart)))
+ }
+ sort.Slice(mnts, func(i, j int) bool {
+ if p := mnts[i].priority - mnts[j].priority; p != 0 {
+ return p > 0
+ }
+ return weight[mnts[i]] < weight[mnts[j]]
+ })
+ return mnts
+}
+
+// checkEqual reports whether the data written to it (via io.WriterAt
+// interface) is equal to the expected data.
//
-type KeepError struct {
- HTTPCode int
- ErrMsg string
+// Expect should not be changed after the first Write.
+//
+// Results are undefined if WriteAt is called with overlapping ranges.
+type checkEqual struct {
+ Expect []byte
+ equal atomic.Int64
+ notequal atomic.Bool
}
-var (
- BadRequestError = &KeepError{400, "Bad Request"}
- UnauthorizedError = &KeepError{401, "Unauthorized"}
- CollisionError = &KeepError{500, "Collision"}
- RequestHashError = &KeepError{422, "Hash mismatch in request"}
- PermissionError = &KeepError{403, "Forbidden"}
- DiskHashError = &KeepError{500, "Hash mismatch in stored data"}
- ExpiredError = &KeepError{401, "Expired permission signature"}
- NotFoundError = &KeepError{404, "Not Found"}
- VolumeBusyError = &KeepError{503, "Volume backend busy"}
- GenericError = &KeepError{500, "Fail"}
- FullError = &KeepError{503, "Full"}
- SizeRequiredError = &KeepError{411, "Missing Content-Length"}
- TooLongError = &KeepError{413, "Block is too large"}
- MethodDisabledError = &KeepError{405, "Method disabled"}
- ErrNotImplemented = &KeepError{500, "Unsupported configuration"}
- ErrClientDisconnect = &KeepError{503, "Client disconnected"}
-)
+func (ce *checkEqual) Equal() bool {
+ return !ce.notequal.Load() && ce.equal.Load() == int64(len(ce.Expect))
+}
-func (e *KeepError) Error() string {
- return e.ErrMsg
+func (ce *checkEqual) WriteAt(p []byte, offset int64) (int, error) {
+ endpos := int(offset) + len(p)
+ if offset >= 0 && endpos <= len(ce.Expect) && bytes.Equal(p, ce.Expect[int(offset):endpos]) {
+ ce.equal.Add(int64(len(p)))
+ } else {
+ ce.notequal.Store(true)
+ }
+ return len(p), nil
}
-// Periodically (once per interval) invoke EmptyTrash on all volumes.
-func emptyTrash(mounts []*VolumeMount, interval time.Duration) {
- for range time.NewTicker(interval).C {
- for _, v := range mounts {
- v.EmptyTrash()
+func (ks *keepstore) BlockUntrash(ctx context.Context, locator string) error {
+ li, err := getLocatorInfo(locator)
+ if err != nil {
+ return err
+ }
+ var errToCaller error = os.ErrNotExist
+ for _, mnt := range ks.mountsW {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ err := mnt.BlockUntrash(li.hash)
+ if err == nil {
+ errToCaller = nil
+ } else if !os.IsNotExist(err) && errToCaller != nil {
+ errToCaller = err
+ }
+ }
+ return errToCaller
+}
+
+func (ks *keepstore) BlockTouch(ctx context.Context, locator string) error {
+ li, err := getLocatorInfo(locator)
+ if err != nil {
+ return err
+ }
+ var errToCaller error = os.ErrNotExist
+ for _, mnt := range ks.mountsW {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ err := mnt.BlockTouch(li.hash)
+ if err == nil {
+ return nil
+ }
+ if !os.IsNotExist(err) {
+ errToCaller = err
+ }
+ }
+ return errToCaller
+}
+
+func (ks *keepstore) BlockTrash(ctx context.Context, locator string) error {
+ if !ks.cluster.Collections.BlobTrash {
+ return errMethodNotAllowed
+ }
+ li, err := getLocatorInfo(locator)
+ if err != nil {
+ return err
+ }
+ var errToCaller error = os.ErrNotExist
+ for _, mnt := range ks.mounts {
+ if !mnt.AllowTrash {
+ continue
+ }
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ t, err := mnt.Mtime(li.hash)
+ if err == nil && time.Now().Sub(t) > ks.cluster.Collections.BlobSigningTTL.Duration() {
+ err = mnt.BlockTrash(li.hash)
+ }
+ if os.IsNotExist(errToCaller) || (errToCaller == nil && !os.IsNotExist(err)) {
+ errToCaller = err
+ }
+ }
+ return errToCaller
+}
+
+func (ks *keepstore) Mounts() []*mount {
+ return ks.mountsR
+}
+
+func (ks *keepstore) Index(ctx context.Context, opts indexOptions) error {
+ mounts := ks.mountsR
+ if opts.MountUUID != "" {
+ mnt, ok := ks.mounts[opts.MountUUID]
+ if !ok {
+ return os.ErrNotExist
+ }
+ mounts = []*mount{mnt}
+ }
+ for _, mnt := range mounts {
+ err := mnt.Index(ctx, opts.Prefix, opts.WriteTo)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ctxToken(ctx context.Context) string {
+ if c, ok := auth.FromContext(ctx); ok && len(c.Tokens) > 0 {
+ return c.Tokens[0]
+ } else {
+ return ""
+ }
+}
+
+// locatorInfo expresses the attributes of a locator that are relevant
+// for keepstore decision-making.
+type locatorInfo struct {
+ hash string
+ size int
+ remote bool // locator has a +R hint
+ signed bool // locator has a +A hint
+}
+
+func getLocatorInfo(loc string) (locatorInfo, error) {
+ var li locatorInfo
+ plus := 0 // number of '+' chars seen so far
+ partlen := 0 // chars since last '+'
+ for i, c := range loc + "+" {
+ if c == '+' {
+ if partlen == 0 {
+ // double/leading/trailing '+'
+ return li, errInvalidLocator
+ }
+ if plus == 0 {
+ if i != 32 {
+ return li, errInvalidLocator
+ }
+ li.hash = loc[:i]
+ }
+ if plus == 1 {
+ if size, err := strconv.Atoi(loc[i-partlen : i]); err == nil {
+ li.size = size
+ }
+ }
+ plus++
+ partlen = 0
+ continue
+ }
+ partlen++
+ if partlen == 1 {
+ if c == 'A' {
+ li.signed = true
+ }
+ if c == 'R' {
+ li.remote = true
+ }
+ if plus > 1 && c >= '0' && c <= '9' {
+ // size, if present at all, must come first
+ return li, errInvalidLocator
+ }
+ }
+ if plus == 0 && !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) {
+ // non-hexadecimal char in hash part
+ return li, errInvalidLocator
}
}
+ return li, nil
}
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
new file mode 100644
index 0000000000..f9d9888f98
--- /dev/null
+++ b/services/keepstore/keepstore_test.go
@@ -0,0 +1,892 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/prometheus/client_golang/prometheus"
+ . "gopkg.in/check.v1"
+)
+
+func TestGocheck(t *testing.T) {
+ TestingT(t)
+}
+
+const (
+ fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+ barHash = "37b51d194a7513e45b56f6524f2d51f2"
+)
+
+var testServiceURL = func() arvados.URL {
+ return arvados.URL{Host: "localhost:12345", Scheme: "http"}
+}()
+
+func authContext(token string) context.Context {
+ return auth.NewContext(context.TODO(), &auth.Credentials{Tokens: []string{token}})
+}
+
+func testCluster(t TB) *arvados.Cluster {
+ cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cluster.SystemRootToken = arvadostest.SystemRootToken
+ cluster.ManagementToken = arvadostest.ManagementToken
+ return cluster
+}
+
+func testKeepstore(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*keepstore, context.CancelFunc) {
+ if reg == nil {
+ reg = prometheus.NewRegistry()
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ ctx = ctxlog.Context(ctx, ctxlog.TestLogger(t))
+ ks, err := newKeepstore(ctx, cluster, cluster.SystemRootToken, reg, testServiceURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return ks, cancel
+}
+
+var _ = Suite(&keepstoreSuite{})
+
+type keepstoreSuite struct {
+ cluster *arvados.Cluster
+}
+
+func (s *keepstoreSuite) SetUpTest(c *C) {
+ s.cluster = testCluster(c)
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+ "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+ }
+}
+
+func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+
+ ctx := authContext(arvadostest.ActiveTokenV2)
+
+ fooHash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ err := ks.mountsW[0].BlockWrite(ctx, fooHash, []byte("bar"))
+ c.Assert(err, IsNil)
+
+ _, err = ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo"),
+ })
+ c.Check(err, ErrorMatches, "hash collision")
+
+ buf := bytes.NewBuffer(nil)
+ _, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
+ WriteTo: buf,
+ })
+ c.Check(err, ErrorMatches, "checksum mismatch in stored data")
+ c.Check(buf.String(), Not(Equals), "foo")
+ c.Check(buf.Len() < 3, Equals, true)
+
+ err = ks.mountsW[1].BlockWrite(ctx, fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+
+ buf = bytes.NewBuffer(nil)
+ _, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
+ WriteTo: buf,
+ })
+ c.Check(err, ErrorMatches, "checksum mismatch in stored data")
+ c.Check(buf.Len() < 3, Equals, true)
+}
+
+func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
+ origKey := s.cluster.Collections.BlobSigningKey
+ s.cluster.Collections.BlobSigning = false
+ s.cluster.Collections.BlobSigningKey = ""
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+
+ resp, err := ks.BlockWrite(authContext("abcde"), arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo"),
+ })
+ c.Assert(err, IsNil)
+ c.Check(resp.Locator, Equals, fooHash+"+3")
+ locUnsigned := resp.Locator
+ ttl := time.Hour
+ locSigned := arvados.SignLocator(locUnsigned, arvadostest.ActiveTokenV2, time.Now().Add(ttl), ttl, []byte(origKey))
+ c.Assert(locSigned, Not(Equals), locUnsigned)
+
+ for _, locator := range []string{locUnsigned, locSigned} {
+ for _, token := range []string{"", "xyzzy", arvadostest.ActiveTokenV2} {
+ c.Logf("=== locator %q token %q", locator, token)
+ ctx := authContext(token)
+ buf := bytes.NewBuffer(nil)
+ _, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: locator,
+ WriteTo: buf,
+ })
+ c.Check(err, IsNil)
+ c.Check(buf.String(), Equals, "foo")
+ }
+ }
+}
+
+func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-111111111111111": {
+ Driver: "stub",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class1": true}},
+ "zzzzz-nyw5e-222222222222222": {
+ Driver: "stub",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class2": true, "class3": true}},
+ }
+
+ // "foobar" is just some data that happens to result in
+ // rendezvous order {111, 222}
+ data := []byte("foobar")
+ hash := fmt.Sprintf("%x", md5.Sum(data))
+
+ for _, trial := range []struct {
+ priority1 int // priority of class1, thus vol1
+ priority2 int // priority of class2
+ priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
+ expectLog string
+ }{
+ {100, 50, 50, "111 read 385\n"}, // class1 has higher priority => try vol1 first, no need to try vol2
+ {100, 100, 100, "111 read 385\n"}, // same priority, vol2 is first in rendezvous order => try vol1 first and succeed
+ {66, 99, 33, "222 read 385\n111 read 385\n"}, // class2 has higher priority => try vol2 first, then try vol1
+ {66, 33, 99, "222 read 385\n111 read 385\n"}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
+ } {
+ c.Logf("=== %+v", trial)
+
+ s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+ "class1": {Priority: trial.priority1},
+ "class2": {Priority: trial.priority2},
+ "class3": {Priority: trial.priority3},
+ }
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+
+ ctx := authContext(arvadostest.ActiveTokenV2)
+ resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: hash,
+ Data: data,
+ StorageClasses: []string{"class1"},
+ })
+ c.Assert(err, IsNil)
+
+ // Combine logs into one. (We only want the logs from
+ // the BlockRead below, not from BlockWrite above.)
+ stubLog := &stubLog{}
+ for _, mnt := range ks.mounts {
+ mnt.volume.(*stubVolume).stubLog = stubLog
+ }
+
+ n, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: resp.Locator,
+ WriteTo: io.Discard,
+ })
+ c.Assert(n, Equals, len(data))
+ c.Assert(err, IsNil)
+ c.Check(stubLog.String(), Equals, trial.expectLog)
+ }
+}
+
+func (s *keepstoreSuite) TestBlockWrite_NoWritableVolumes(c *C) {
+ for uuid, v := range s.cluster.Volumes {
+ v.ReadOnly = true
+ s.cluster.Volumes[uuid] = v
+ }
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+ for _, mnt := range ks.mounts {
+ mnt.volume.(*stubVolume).blockWrite = func(context.Context, string, []byte) error {
+ c.Error("volume BlockWrite called")
+ return errors.New("fail")
+ }
+ }
+ ctx := authContext(arvadostest.ActiveTokenV2)
+
+ _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo")})
+ c.Check(err, NotNil)
+ c.Check(err.(interface{ HTTPStatus() int }).HTTPStatus(), Equals, http.StatusInsufficientStorage)
+}
+
+func (s *keepstoreSuite) TestBlockWrite_MultipleStorageClasses(c *C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-111111111111111": {
+ Driver: "stub",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class1": true}},
+ "zzzzz-nyw5e-121212121212121": {
+ Driver: "stub",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class1": true, "class2": true}},
+ "zzzzz-nyw5e-222222222222222": {
+ Driver: "stub",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class2": true}},
+ }
+
+ // testData is a block that happens to have rendezvous order 111, 121, 222
+ testData := []byte("qux")
+ testHash := fmt.Sprintf("%x+%d", md5.Sum(testData), len(testData))
+
+ s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+ "class1": {},
+ "class2": {},
+ "class3": {},
+ }
+
+ ctx := authContext(arvadostest.ActiveTokenV2)
+ for idx, trial := range []struct {
+ classes string // desired classes
+ expectLog string
+ }{
+ {"class1", "" +
+ "111 read d85\n" +
+ "121 read d85\n" +
+ "111 write d85\n" +
+ "111 read d85\n" +
+ "111 touch d85\n"},
+ {"class2", "" +
+ "121 read d85\n" + // write#1
+ "222 read d85\n" +
+ "121 write d85\n" +
+ "121 read d85\n" + // write#2
+ "121 touch d85\n"},
+ {"class1,class2", "" +
+ "111 read d85\n" + // write#1
+ "121 read d85\n" +
+ "222 read d85\n" +
+ "121 write d85\n" +
+ "111 write d85\n" +
+ "111 read d85\n" + // write#2
+ "111 touch d85\n" +
+ "121 read d85\n" +
+ "121 touch d85\n"},
+ {"class1,class2,class404", "" +
+ "111 read d85\n" + // write#1
+ "121 read d85\n" +
+ "222 read d85\n" +
+ "121 write d85\n" +
+ "111 write d85\n" +
+ "111 read d85\n" + // write#2
+ "111 touch d85\n" +
+ "121 read d85\n" +
+ "121 touch d85\n"},
+ } {
+ c.Logf("=== %d: %+v", idx, trial)
+
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+ stubLog := &stubLog{}
+ for _, mnt := range ks.mounts {
+ mnt.volume.(*stubVolume).stubLog = stubLog
+ }
+
+ // Check that we chose the right block data
+ rvz := ks.rendezvous(testHash, ks.mountsW)
+ c.Assert(rvz[0].UUID[24:], Equals, "111")
+ c.Assert(rvz[1].UUID[24:], Equals, "121")
+ c.Assert(rvz[2].UUID[24:], Equals, "222")
+
+ for i := 0; i < 2; i++ {
+ _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: testHash,
+ Data: testData,
+ StorageClasses: strings.Split(trial.classes, ","),
+ })
+ c.Check(err, IsNil)
+ }
+ c.Check(stubLog.String(), Equals, trial.expectLog)
+ }
+}
+
+func (s *keepstoreSuite) TestBlockTrash(c *C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+ "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+ "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true},
+ "zzzzz-nyw5e-333333333333333": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+ }
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+
+ var vol []*stubVolume
+ for _, mount := range ks.mountsR {
+ vol = append(vol, mount.volume.(*stubVolume))
+ }
+ sort.Slice(vol, func(i, j int) bool {
+ return vol[i].params.UUID < vol[j].params.UUID
+ })
+
+ ctx := context.Background()
+ loc := fooHash + "+3"
+ tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
+
+ clear := func() {
+ for _, vol := range vol {
+ err := vol.BlockTrash(fooHash)
+ if !os.IsNotExist(err) {
+ c.Assert(err, IsNil)
+ }
+ }
+ }
+ writeit := func(volidx int) {
+ err := vol[volidx].BlockWrite(ctx, fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ err = vol[volidx].blockTouchWithTime(fooHash, tOld)
+ c.Assert(err, IsNil)
+ }
+ trashit := func() error {
+ return ks.BlockTrash(ctx, loc)
+ }
+ checkexists := func(volidx int) bool {
+ err := vol[volidx].BlockRead(ctx, fooHash, brdiscard)
+ if !os.IsNotExist(err) {
+ c.Check(err, IsNil)
+ }
+ return err == nil
+ }
+
+ clear()
+ c.Check(trashit(), Equals, os.ErrNotExist)
+
+ // one old replica => trash it
+ clear()
+ writeit(0)
+ c.Check(trashit(), IsNil)
+ c.Check(checkexists(0), Equals, false)
+
+ // one old replica + one new replica => keep new, trash old
+ clear()
+ writeit(0)
+ writeit(1)
+ c.Check(vol[1].blockTouchWithTime(fooHash, time.Now()), IsNil)
+ c.Check(trashit(), IsNil)
+ c.Check(checkexists(0), Equals, false)
+ c.Check(checkexists(1), Equals, true)
+
+ // two old replicas => trash both
+ clear()
+ writeit(0)
+ writeit(1)
+ c.Check(trashit(), IsNil)
+ c.Check(checkexists(0), Equals, false)
+ c.Check(checkexists(1), Equals, false)
+
+ // four old replicas => trash all except readonly volume with
+ // AllowTrashWhenReadOnly==false
+ clear()
+ writeit(0)
+ writeit(1)
+ writeit(2)
+ writeit(3)
+ c.Check(trashit(), IsNil)
+ c.Check(checkexists(0), Equals, false)
+ c.Check(checkexists(1), Equals, false)
+ c.Check(checkexists(2), Equals, true)
+ c.Check(checkexists(3), Equals, false)
+
+ // two old replicas but one returns an error => return the
+ // only non-404 backend error
+ clear()
+ vol[0].blockTrash = func(hash string) error {
+ return errors.New("fake error")
+ }
+ writeit(0)
+ writeit(3)
+ c.Check(trashit(), ErrorMatches, "fake error")
+ c.Check(checkexists(0), Equals, true)
+ c.Check(checkexists(1), Equals, false)
+ c.Check(checkexists(2), Equals, false)
+ c.Check(checkexists(3), Equals, false)
+}
+
+func (s *keepstoreSuite) TestBlockWrite_OnlyOneBuffer(c *C) {
+ s.cluster.API.MaxKeepBlobBuffers = 1
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+ ok := make(chan struct{})
+ go func() {
+ defer close(ok)
+ ctx := authContext(arvadostest.ActiveTokenV2)
+ _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo")})
+ c.Check(err, IsNil)
+ }()
+ select {
+ case <-ok:
+ case <-time.After(time.Second):
+ c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
+ }
+}
+
+func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
+ s.cluster.API.MaxKeepBlobBuffers = 4
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+
+ ctx := authContext(arvadostest.ActiveTokenV2)
+ var wg sync.WaitGroup
+ for range make([]int, 20) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo")})
+ c.Check(err, IsNil)
+ _, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: resp.Locator,
+ WriteTo: io.Discard})
+ c.Check(err, IsNil)
+ }()
+ }
+ ok := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(ok)
+ }()
+ select {
+ case <-ok:
+ case <-time.After(time.Second):
+ c.Fatal("read/write sequence deadlocks, likely buffer pool leak")
+ }
+}
+
+func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"}, // "default" is implicit
+ "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"special": true, "extra": true}},
+ "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
+ }
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+ ctx := authContext(arvadostest.ActiveTokenV2)
+
+ for _, trial := range []struct {
+ ask []string
+ expectReplicas int
+ expectClasses map[string]int
+ }{
+ {nil,
+ 1,
+ map[string]int{"default": 1}},
+ {[]string{},
+ 1,
+ map[string]int{"default": 1}},
+ {[]string{"default"},
+ 1,
+ map[string]int{"default": 1}},
+ {[]string{"default", "default"},
+ 1,
+ map[string]int{"default": 1}},
+ {[]string{"special"},
+ 1,
+ map[string]int{"extra": 1, "special": 1}},
+ {[]string{"special", "readonly"},
+ 1,
+ map[string]int{"extra": 1, "special": 1}},
+ {[]string{"special", "nonexistent"},
+ 1,
+ map[string]int{"extra": 1, "special": 1}},
+ {[]string{"extra", "special"},
+ 1,
+ map[string]int{"extra": 1, "special": 1}},
+ {[]string{"default", "special"},
+ 2,
+ map[string]int{"default": 1, "extra": 1, "special": 1}},
+ } {
+ c.Logf("success case %#v", trial)
+ resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo"),
+ StorageClasses: trial.ask,
+ })
+ if !c.Check(err, IsNil) {
+ continue
+ }
+ c.Check(resp.Replicas, Equals, trial.expectReplicas)
+ if len(trial.expectClasses) == 0 {
+ // any non-empty value is correct
+ c.Check(resp.StorageClasses, Not(HasLen), 0)
+ } else {
+ c.Check(resp.StorageClasses, DeepEquals, trial.expectClasses)
+ }
+ }
+
+ for _, ask := range [][]string{
+ {"doesnotexist"},
+ {"doesnotexist", "readonly"},
+ {"readonly"},
+ } {
+ c.Logf("failure case %s", ask)
+ _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo"),
+ StorageClasses: ask,
+ })
+ c.Check(err, NotNil)
+ }
+}
+
+func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
+ for uuid, v := range s.cluster.Volumes {
+ v.ReadOnly = true
+ s.cluster.Volumes[uuid] = v
+ }
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+
+ for _, mnt := range ks.mounts {
+ err := mnt.BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ err = mnt.BlockRead(context.Background(), fooHash, brdiscard)
+ c.Assert(err, IsNil)
+ }
+
+ err := ks.BlockUntrash(context.Background(), fooHash)
+ c.Check(os.IsNotExist(err), Equals, true)
+
+ for _, mnt := range ks.mounts {
+ err := mnt.BlockRead(context.Background(), fooHash, brdiscard)
+ c.Assert(err, IsNil)
+ }
+}
+
+func (s *keepstoreSuite) TestBlockWrite_SkipReadOnly(c *C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+ "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", ReadOnly: true},
+ "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+ }
+ ks, cancel := testKeepstore(c, s.cluster, nil)
+ defer cancel()
+ ctx := authContext(arvadostest.ActiveTokenV2)
+
+ for i := range make([]byte, 32) {
+ data := []byte(fmt.Sprintf("block %d", i))
+ _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{Data: data})
+ c.Assert(err, IsNil)
+ }
+ c.Check(ks.mounts["zzzzz-nyw5e-000000000000000"].volume.(*stubVolume).stubLog.String(), Matches, "(?ms).*write.*")
+ c.Check(ks.mounts["zzzzz-nyw5e-111111111111111"].volume.(*stubVolume).stubLog.String(), HasLen, 0)
+ c.Check(ks.mounts["zzzzz-nyw5e-222222222222222"].volume.(*stubVolume).stubLog.String(), HasLen, 0)
+}
+
+func (s *keepstoreSuite) TestGetLocatorInfo(c *C) {
+ for _, trial := range []struct {
+ locator string
+ ok bool
+ expect locatorInfo
+ }{
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ ok: true},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+ ok: true, expect: locatorInfo{size: 1234}},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234+Abcdef@abcdef",
+ ok: true, expect: locatorInfo{size: 1234, signed: true}},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234+Rzzzzz-abcdef",
+ ok: true, expect: locatorInfo{size: 1234, remote: true}},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345+Zexample+Rzzzzz-abcdef",
+ ok: true, expect: locatorInfo{size: 12345, remote: true}},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+123456+ð¶ð¦+Rzzzzz-abcdef",
+ ok: true, expect: locatorInfo{size: 123456, remote: true}},
+ // invalid: bad hash char
+ {locator: "aaaaaaaaaaaaaazaaaaaaaaaaaaaaaaa+1234",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaFaaaaaaaaaaaaaaaaa+1234",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaâµaaaaaaaaaaaaaaaaa+1234",
+ ok: false},
+ // invalid: hash length != 32
+ {locator: "",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabb",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabb+1234",
+ ok: false},
+ // invalid: first hint is not size
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+Abcdef+1234",
+ ok: false},
+ // invalid: leading/trailing/double +
+ {locator: "+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234+",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa++1234",
+ ok: false},
+ {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234++Abcdef@abcdef",
+ ok: false},
+ } {
+ c.Logf("=== %s", trial.locator)
+ li, err := getLocatorInfo(trial.locator)
+ if !trial.ok {
+ c.Check(err, NotNil)
+ continue
+ }
+ c.Check(err, IsNil)
+ c.Check(li.hash, Equals, trial.locator[:32])
+ c.Check(li.size, Equals, trial.expect.size)
+ c.Check(li.signed, Equals, trial.expect.signed)
+ c.Check(li.remote, Equals, trial.expect.remote)
+ }
+}
+
+func init() {
+ driver["stub"] = func(params newVolumeParams) (volume, error) {
+ v := &stubVolume{
+ params: params,
+ data: make(map[string]stubData),
+ stubLog: &stubLog{},
+ }
+ return v, nil
+ }
+}
+
+type stubLog struct {
+ sync.Mutex
+ bytes.Buffer
+}
+
+func (sl *stubLog) Printf(format string, args ...interface{}) {
+ if sl == nil {
+ return
+ }
+ sl.Lock()
+ defer sl.Unlock()
+ fmt.Fprintf(sl, format+"\n", args...)
+}
+
+type stubData struct {
+ mtime time.Time
+ data []byte
+ trash time.Time
+}
+
+type stubVolume struct {
+ params newVolumeParams
+ data map[string]stubData
+ stubLog *stubLog
+ mtx sync.Mutex
+
+ // The following funcs enable tests to insert delays and
+ // failures. Each volume operation begins by calling the
+ // corresponding func (if non-nil). If the func returns an
+ // error, that error is returned to caller. Otherwise, the
+ // stub continues normally.
+ blockRead func(ctx context.Context, hash string, writeTo io.WriterAt) error
+ blockWrite func(ctx context.Context, hash string, data []byte) error
+ deviceID func() string
+ blockTouch func(hash string) error
+ blockTrash func(hash string) error
+ blockUntrash func(hash string) error
+ index func(ctx context.Context, prefix string, writeTo io.Writer) error
+ mtime func(hash string) (time.Time, error)
+ emptyTrash func()
+}
+
+func (v *stubVolume) log(op, hash string) {
+ // Note this intentionally crashes if UUID or hash is short --
+ // if keepstore ever does that, tests should fail.
+ v.stubLog.Printf("%s %s %s", v.params.UUID[24:27], op, hash[:3])
+}
+
+func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.WriterAt) error {
+ v.log("read", hash)
+ if v.blockRead != nil {
+ err := v.blockRead(ctx, hash, writeTo)
+ if err != nil {
+ return err
+ }
+ }
+ v.mtx.Lock()
+ ent, ok := v.data[hash]
+ v.mtx.Unlock()
+ if !ok || !ent.trash.IsZero() {
+ return os.ErrNotExist
+ }
+ wrote := 0
+ for writesize := 1000; wrote < len(ent.data); writesize = writesize * 2 {
+ data := ent.data[wrote:]
+ if len(data) > writesize {
+ data = data[:writesize]
+ }
+ n, err := writeTo.WriteAt(data, int64(wrote))
+ wrote += n
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (v *stubVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+ v.log("write", hash)
+ if v.blockWrite != nil {
+ if err := v.blockWrite(ctx, hash, data); err != nil {
+ return err
+ }
+ }
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ v.data[hash] = stubData{
+ mtime: time.Now(),
+ data: append([]byte(nil), data...),
+ }
+ return nil
+}
+
+func (v *stubVolume) DeviceID() string {
+ return fmt.Sprintf("%p", v)
+}
+
+func (v *stubVolume) BlockTouch(hash string) error {
+ v.log("touch", hash)
+ if v.blockTouch != nil {
+ if err := v.blockTouch(hash); err != nil {
+ return err
+ }
+ }
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ ent, ok := v.data[hash]
+ if !ok || !ent.trash.IsZero() {
+ return os.ErrNotExist
+ }
+ ent.mtime = time.Now()
+ v.data[hash] = ent
+ return nil
+}
+
+// Set mtime to the (presumably old) specified time.
+func (v *stubVolume) blockTouchWithTime(hash string, t time.Time) error {
+ v.log("touchwithtime", hash)
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ ent, ok := v.data[hash]
+ if !ok {
+ return os.ErrNotExist
+ }
+ ent.mtime = t
+ v.data[hash] = ent
+ return nil
+}
+
+func (v *stubVolume) BlockTrash(hash string) error {
+ v.log("trash", hash)
+ if v.blockTrash != nil {
+ if err := v.blockTrash(hash); err != nil {
+ return err
+ }
+ }
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ ent, ok := v.data[hash]
+ if !ok || !ent.trash.IsZero() {
+ return os.ErrNotExist
+ }
+ ent.trash = time.Now().Add(v.params.Cluster.Collections.BlobTrashLifetime.Duration())
+ v.data[hash] = ent
+ return nil
+}
+
+func (v *stubVolume) BlockUntrash(hash string) error {
+ v.log("untrash", hash)
+ if v.blockUntrash != nil {
+ if err := v.blockUntrash(hash); err != nil {
+ return err
+ }
+ }
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ ent, ok := v.data[hash]
+ if !ok || ent.trash.IsZero() {
+ return os.ErrNotExist
+ }
+ ent.trash = time.Time{}
+ v.data[hash] = ent
+ return nil
+}
+
+func (v *stubVolume) Index(ctx context.Context, prefix string, writeTo io.Writer) error {
+ v.stubLog.Printf("%s index %s", v.params.UUID, prefix)
+ if v.index != nil {
+ if err := v.index(ctx, prefix, writeTo); err != nil {
+ return err
+ }
+ }
+ buf := &bytes.Buffer{}
+ v.mtx.Lock()
+ for hash, ent := range v.data {
+ if ent.trash.IsZero() && strings.HasPrefix(hash, prefix) {
+ fmt.Fprintf(buf, "%s+%d %d\n", hash, len(ent.data), ent.mtime.UnixNano())
+ }
+ }
+ v.mtx.Unlock()
+ _, err := io.Copy(writeTo, buf)
+ return err
+}
+
+func (v *stubVolume) Mtime(hash string) (time.Time, error) {
+ v.log("mtime", hash)
+ if v.mtime != nil {
+ if t, err := v.mtime(hash); err != nil {
+ return t, err
+ }
+ }
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ ent, ok := v.data[hash]
+ if !ok || !ent.trash.IsZero() {
+ return time.Time{}, os.ErrNotExist
+ }
+ return ent.mtime, nil
+}
+
+func (v *stubVolume) EmptyTrash() {
+ v.stubLog.Printf("%s emptytrash", v.params.UUID)
+ v.mtx.Lock()
+ defer v.mtx.Unlock()
+ for hash, ent := range v.data {
+ if !ent.trash.IsZero() && time.Now().After(ent.trash) {
+ delete(v.data, hash)
+ }
+ }
+}
diff --git a/services/keepstore/metrics.go b/services/keepstore/metrics.go
index d04601fbec..4638de5444 100644
--- a/services/keepstore/metrics.go
+++ b/services/keepstore/metrics.go
@@ -5,66 +5,9 @@
package keepstore
import (
- "fmt"
-
"github.com/prometheus/client_golang/prometheus"
)
-type nodeMetrics struct {
- reg *prometheus.Registry
-}
-
-func (m *nodeMetrics) setupBufferPoolMetrics(b *bufferPool) {
- m.reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Subsystem: "keepstore",
- Name: "bufferpool_allocated_bytes",
- Help: "Number of bytes allocated to buffers",
- },
- func() float64 { return float64(b.Alloc()) },
- ))
- m.reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Subsystem: "keepstore",
- Name: "bufferpool_max_buffers",
- Help: "Maximum number of buffers allowed",
- },
- func() float64 { return float64(b.Cap()) },
- ))
- m.reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Subsystem: "keepstore",
- Name: "bufferpool_inuse_buffers",
- Help: "Number of buffers in use",
- },
- func() float64 { return float64(b.Len()) },
- ))
-}
-
-func (m *nodeMetrics) setupWorkQueueMetrics(q *WorkQueue, qName string) {
- m.reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Subsystem: "keepstore",
- Name: fmt.Sprintf("%s_queue_inprogress_entries", qName),
- Help: fmt.Sprintf("Number of %s requests in progress", qName),
- },
- func() float64 { return float64(getWorkQueueStatus(q).InProgress) },
- ))
- m.reg.MustRegister(prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "arvados",
- Subsystem: "keepstore",
- Name: fmt.Sprintf("%s_queue_pending_entries", qName),
- Help: fmt.Sprintf("Number of queued %s requests", qName),
- },
- func() float64 { return float64(getWorkQueueStatus(q).Queued) },
- ))
-}
-
type volumeMetricsVecs struct {
ioBytes *prometheus.CounterVec
errCounters *prometheus.CounterVec
diff --git a/services/keepstore/metrics_test.go b/services/keepstore/metrics_test.go
new file mode 100644
index 0000000000..0c8f1e68e6
--- /dev/null
+++ b/services/keepstore/metrics_test.go
@@ -0,0 +1,87 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/prometheus/client_golang/prometheus"
+ . "gopkg.in/check.v1"
+)
+
+func (s *routerSuite) TestMetrics(c *C) {
+ reg := prometheus.NewRegistry()
+ router, cancel := testRouter(c, s.cluster, reg)
+ defer cancel()
+ instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+ handler := instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
+
+ router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+ Hash: fooHash,
+ Data: []byte("foo"),
+ })
+ router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+ Hash: barHash,
+ Data: []byte("bar"),
+ })
+
+ // prime the metrics by doing a no-op request
+ resp := call(handler, "GET", "/", "", nil, nil)
+
+ resp = call(handler, "GET", "/metrics.json", "", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+ resp = call(handler, "GET", "/metrics.json", "foobar", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusForbidden)
+ resp = call(handler, "GET", "/metrics.json", arvadostest.ManagementToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ var j []struct {
+ Name string
+ Help string
+ Type string
+ Metric []struct {
+ Label []struct {
+ Name string
+ Value string
+ }
+ Summary struct {
+ SampleCount string
+ SampleSum float64
+ }
+ }
+ }
+ json.NewDecoder(resp.Body).Decode(&j)
+ found := make(map[string]bool)
+ names := map[string]bool{}
+ for _, g := range j {
+ names[g.Name] = true
+ for _, m := range g.Metric {
+ if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
+ c.Check(m.Summary.SampleCount, Equals, "2")
+ found[g.Name] = true
+ }
+ }
+ }
+
+ metricsNames := []string{
+ "arvados_keepstore_bufferpool_inuse_buffers",
+ "arvados_keepstore_bufferpool_max_buffers",
+ "arvados_keepstore_bufferpool_allocated_bytes",
+ "arvados_keepstore_pull_queue_inprogress_entries",
+ "arvados_keepstore_pull_queue_pending_entries",
+ "arvados_keepstore_trash_queue_inprogress_entries",
+ "arvados_keepstore_trash_queue_pending_entries",
+ "request_duration_seconds",
+ }
+ for _, m := range metricsNames {
+ _, ok := names[m]
+ c.Check(ok, Equals, true, Commentf("checking metric %q", m))
+ }
+}
diff --git a/services/keepstore/mock_mutex_for_test.go b/services/keepstore/mock_mutex_for_test.go
deleted file mode 100644
index daf0ef05f7..0000000000
--- a/services/keepstore/mock_mutex_for_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-type MockMutex struct {
- AllowLock chan struct{}
- AllowUnlock chan struct{}
-}
-
-func NewMockMutex() *MockMutex {
- return &MockMutex{
- AllowLock: make(chan struct{}),
- AllowUnlock: make(chan struct{}),
- }
-}
-
-// Lock waits for someone to send to AllowLock.
-func (m *MockMutex) Lock() {
- <-m.AllowLock
-}
-
-// Unlock waits for someone to send to AllowUnlock.
-func (m *MockMutex) Unlock() {
- <-m.AllowUnlock
-}
diff --git a/services/keepstore/mounts_test.go b/services/keepstore/mounts_test.go
index e8c248219f..d29d5f6dc0 100644
--- a/services/keepstore/mounts_test.go
+++ b/services/keepstore/mounts_test.go
@@ -5,28 +5,24 @@
package keepstore
import (
- "bytes"
"context"
"encoding/json"
"net/http"
- "net/http/httptest"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "git.arvados.org/arvados.git/sdk/go/httpserver"
- "github.com/prometheus/client_golang/prometheus"
- check "gopkg.in/check.v1"
+ . "gopkg.in/check.v1"
)
-func (s *HandlerSuite) TestMounts(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *routerSuite) TestMounts(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
- vols := s.handler.volmgr.AllWritable()
- vols[0].Put(context.Background(), TestHash, TestBlock)
- vols[1].Put(context.Background(), TestHash2, TestBlock2)
+ router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+ router.keepstore.mountsW[1].BlockWrite(context.Background(), barHash, []byte("bar"))
+
+ resp := call(router, "GET", "/mounts", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Log(resp.Body.String())
- resp := s.call("GET", "/mounts", "", nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
var mntList []struct {
UUID string `json:"uuid"`
DeviceID string `json:"device_id"`
@@ -34,119 +30,56 @@ func (s *HandlerSuite) TestMounts(c *check.C) {
Replication int `json:"replication"`
StorageClasses map[string]bool `json:"storage_classes"`
}
- c.Log(resp.Body.String())
err := json.Unmarshal(resp.Body.Bytes(), &mntList)
- c.Assert(err, check.IsNil)
- c.Assert(len(mntList), check.Equals, 2)
+ c.Assert(err, IsNil)
+ c.Assert(mntList, HasLen, 2)
+
for _, m := range mntList {
- c.Check(len(m.UUID), check.Equals, 27)
- c.Check(m.UUID[:12], check.Equals, "zzzzz-nyw5e-")
- c.Check(m.DeviceID, check.Equals, "mock-device-id")
- c.Check(m.ReadOnly, check.Equals, false)
- c.Check(m.Replication, check.Equals, 1)
- c.Check(m.StorageClasses, check.DeepEquals, map[string]bool{"default": true})
+ c.Check(len(m.UUID), Equals, 27)
+ c.Check(m.UUID[:12], Equals, "zzzzz-nyw5e-")
+ c.Check(m.DeviceID, Matches, "0x[0-9a-f]+")
+ c.Check(m.ReadOnly, Equals, false)
+ c.Check(m.Replication, Equals, 1)
+ c.Check(m.StorageClasses, HasLen, 1)
+ for k := range m.StorageClasses {
+ c.Check(k, Matches, "testclass.*")
+ }
}
- c.Check(mntList[0].UUID, check.Not(check.Equals), mntList[1].UUID)
+ c.Check(mntList[0].UUID, Not(Equals), mntList[1].UUID)
- // Bad auth
+ c.Logf("=== bad auth")
for _, tok := range []string{"", "xyzzy"} {
- resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil)
- c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
- c.Check(resp.Body.String(), check.Equals, "Unauthorized\n")
- }
-
- tok := arvadostest.SystemRootToken
-
- // Nonexistent mount UUID
- resp = s.call("GET", "/mounts/X/blocks", tok, nil)
- c.Check(resp.Code, check.Equals, http.StatusNotFound)
- c.Check(resp.Body.String(), check.Equals, "mount not found\n")
-
- // Complete index of first mount
- resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks", tok, nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
-
- // Partial index of first mount (one block matches prefix)
- resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+TestHash[:2], tok, nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
-
- // Complete index of second mount (note trailing slash)
- resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/", tok, nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- c.Check(resp.Body.String(), check.Matches, TestHash2+`\+[0-9]+ [0-9]+\n\n`)
-
- // Partial index of second mount (no blocks match prefix)
- resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+TestHash[:2], tok, nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- c.Check(resp.Body.String(), check.Equals, "\n")
-}
-
-func (s *HandlerSuite) TestMetrics(c *check.C) {
- reg := prometheus.NewRegistry()
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", reg, testServiceURL), check.IsNil)
- instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), s.handler.Handler)
- s.handler.Handler = instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
-
- s.call("PUT", "/"+TestHash, "", TestBlock)
- s.call("PUT", "/"+TestHash2, "", TestBlock2)
- resp := s.call("GET", "/metrics.json", "", nil)
- c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
- resp = s.call("GET", "/metrics.json", "foobar", nil)
- c.Check(resp.Code, check.Equals, http.StatusForbidden)
- resp = s.call("GET", "/metrics.json", arvadostest.ManagementToken, nil)
- c.Check(resp.Code, check.Equals, http.StatusOK)
- var j []struct {
- Name string
- Help string
- Type string
- Metric []struct {
- Label []struct {
- Name string
- Value string
- }
- Summary struct {
- SampleCount string
- SampleSum float64
- }
- }
- }
- json.NewDecoder(resp.Body).Decode(&j)
- found := make(map[string]bool)
- names := map[string]bool{}
- for _, g := range j {
- names[g.Name] = true
- for _, m := range g.Metric {
- if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
- c.Check(m.Summary.SampleCount, check.Equals, "2")
- found[g.Name] = true
- }
+ resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil, nil)
+ if tok == "" {
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+ c.Check(resp.Body.String(), Equals, "Unauthorized\n")
+ } else {
+ c.Check(resp.Code, Equals, http.StatusForbidden)
+ c.Check(resp.Body.String(), Equals, "Forbidden\n")
}
}
- metricsNames := []string{
- "arvados_keepstore_bufferpool_inuse_buffers",
- "arvados_keepstore_bufferpool_max_buffers",
- "arvados_keepstore_bufferpool_allocated_bytes",
- "arvados_keepstore_pull_queue_inprogress_entries",
- "arvados_keepstore_pull_queue_pending_entries",
- "arvados_keepstore_trash_queue_inprogress_entries",
- "arvados_keepstore_trash_queue_pending_entries",
- "request_duration_seconds",
- }
- for _, m := range metricsNames {
- _, ok := names[m]
- c.Check(ok, check.Equals, true, check.Commentf("checking metric %q", m))
- }
-}
-
-func (s *HandlerSuite) call(method, path, tok string, body []byte) *httptest.ResponseRecorder {
- resp := httptest.NewRecorder()
- req, _ := http.NewRequest(method, path, bytes.NewReader(body))
- if tok != "" {
- req.Header.Set("Authorization", "Bearer "+tok)
- }
- s.handler.ServeHTTP(resp, req)
- return resp
+ c.Logf("=== nonexistent mount UUID")
+ resp = call(router, "GET", "/mounts/X/blocks", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusNotFound)
+
+ c.Logf("=== complete index of first mount")
+ resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
+
+ c.Logf("=== partial index of first mount (one block matches prefix)")
+ resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
+
+ c.Logf("=== complete index of second mount (note trailing slash)")
+ resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Matches, barHash+`\+[0-9]+ [0-9]+\n\n`)
+
+ c.Logf("=== partial index of second mount (no blocks match prefix)")
+ resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "\n")
}
diff --git a/services/keepstore/perms.go b/services/keepstore/perms.go
deleted file mode 100644
index 7205a4594d..0000000000
--- a/services/keepstore/perms.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
-)
-
-// SignLocator takes a blobLocator, an apiToken and an expiry time, and
-// returns a signed locator string.
-func SignLocator(cluster *arvados.Cluster, blobLocator, apiToken string, expiry time.Time) string {
- return keepclient.SignLocator(blobLocator, apiToken, expiry, cluster.Collections.BlobSigningTTL.Duration(), []byte(cluster.Collections.BlobSigningKey))
-}
-
-// VerifySignature returns nil if the signature on the signedLocator
-// can be verified using the given apiToken. Otherwise it returns
-// either ExpiredError (if the timestamp has expired, which is
-// something the client could have figured out independently) or
-// PermissionError.
-func VerifySignature(cluster *arvados.Cluster, signedLocator, apiToken string) error {
- err := keepclient.VerifySignature(signedLocator, apiToken, cluster.Collections.BlobSigningTTL.Duration(), []byte(cluster.Collections.BlobSigningKey))
- if err == keepclient.ErrSignatureExpired {
- return ExpiredError
- } else if err != nil {
- return PermissionError
- }
- return nil
-}
diff --git a/services/keepstore/perms_test.go b/services/keepstore/perms_test.go
deleted file mode 100644
index 1322374706..0000000000
--- a/services/keepstore/perms_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "strconv"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- check "gopkg.in/check.v1"
-)
-
-const (
- knownHash = "acbd18db4cc2f85cedef654fccc4a4d8"
- knownLocator = knownHash + "+3"
- knownToken = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
- knownKey = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
- "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
- "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
- "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
- "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
- "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
- "786u5rw2a9gx743dj3fgq2irk"
- knownSignatureTTL = arvados.Duration(24 * 14 * time.Hour)
- knownSignature = "89118b78732c33104a4d6231e8b5a5fa1e4301e3"
- knownTimestamp = "7fffffff"
- knownSigHint = "+A" + knownSignature + "@" + knownTimestamp
- knownSignedLocator = knownLocator + knownSigHint
-)
-
-func (s *HandlerSuite) TestSignLocator(c *check.C) {
- tsInt, err := strconv.ParseInt(knownTimestamp, 16, 0)
- if err != nil {
- c.Fatal(err)
- }
- t0 := time.Unix(tsInt, 0)
-
- s.cluster.Collections.BlobSigningTTL = knownSignatureTTL
- s.cluster.Collections.BlobSigningKey = knownKey
- if x := SignLocator(s.cluster, knownLocator, knownToken, t0); x != knownSignedLocator {
- c.Fatalf("Got %+q, expected %+q", x, knownSignedLocator)
- }
-
- s.cluster.Collections.BlobSigningKey = "arbitrarykey"
- if x := SignLocator(s.cluster, knownLocator, knownToken, t0); x == knownSignedLocator {
- c.Fatalf("Got same signature %+q, even though blobSigningKey changed", x)
- }
-}
-
-func (s *HandlerSuite) TestVerifyLocator(c *check.C) {
- s.cluster.Collections.BlobSigningTTL = knownSignatureTTL
- s.cluster.Collections.BlobSigningKey = knownKey
- if err := VerifySignature(s.cluster, knownSignedLocator, knownToken); err != nil {
- c.Fatal(err)
- }
-
- s.cluster.Collections.BlobSigningKey = "arbitrarykey"
- if err := VerifySignature(s.cluster, knownSignedLocator, knownToken); err == nil {
- c.Fatal("Verified signature even with wrong blobSigningKey")
- }
-}
diff --git a/services/keepstore/pipe_adapters.go b/services/keepstore/pipe_adapters.go
deleted file mode 100644
index 6b555054b6..0000000000
--- a/services/keepstore/pipe_adapters.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "bytes"
- "context"
- "io"
- "io/ioutil"
-)
-
-// getWithPipe invokes getter and copies the resulting data into
-// buf. If ctx is done before all data is copied, getWithPipe closes
-// the pipe with an error, and returns early with an error.
-func getWithPipe(ctx context.Context, loc string, buf []byte, br BlockReader) (int, error) {
- piper, pipew := io.Pipe()
- go func() {
- pipew.CloseWithError(br.ReadBlock(ctx, loc, pipew))
- }()
- done := make(chan struct{})
- var size int
- var err error
- go func() {
- size, err = io.ReadFull(piper, buf)
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- err = nil
- }
- close(done)
- }()
- select {
- case <-ctx.Done():
- piper.CloseWithError(ctx.Err())
- return 0, ctx.Err()
- case <-done:
- piper.Close()
- return size, err
- }
-}
-
-// putWithPipe invokes putter with a new pipe, and copies data
-// from buf into the pipe. If ctx is done before all data is copied,
-// putWithPipe closes the pipe with an error, and returns early with
-// an error.
-func putWithPipe(ctx context.Context, loc string, buf []byte, bw BlockWriter) error {
- piper, pipew := io.Pipe()
- copyErr := make(chan error)
- go func() {
- _, err := io.Copy(pipew, bytes.NewReader(buf))
- copyErr <- err
- close(copyErr)
- }()
-
- putErr := make(chan error, 1)
- go func() {
- putErr <- bw.WriteBlock(ctx, loc, piper)
- close(putErr)
- }()
-
- var err error
- select {
- case err = <-copyErr:
- case err = <-putErr:
- case <-ctx.Done():
- err = ctx.Err()
- }
-
- // Ensure io.Copy goroutine isn't blocked writing to pipew
- // (otherwise, io.Copy is still using buf so it isn't safe to
- // return). This can cause pipew to receive corrupt data if
- // err came from copyErr or ctx.Done() before the copy
- // finished. That's OK, though: in that case err != nil, and
- // CloseWithErr(err) ensures putter() will get an error from
- // piper.Read() before seeing EOF.
- go pipew.CloseWithError(err)
- go io.Copy(ioutil.Discard, piper)
- <-copyErr
-
- // Note: io.Copy() is finished now, but putter() might still
- // be running. If we encounter an error before putter()
- // returns, we return right away without waiting for putter().
-
- if err != nil {
- return err
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case err = <-putErr:
- return err
- }
-}
diff --git a/services/keepstore/proxy_remote.go b/services/keepstore/proxy_remote.go
deleted file mode 100644
index 526bc25299..0000000000
--- a/services/keepstore/proxy_remote.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "context"
- "errors"
- "io"
- "net/http"
- "regexp"
- "strings"
- "sync"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadosclient"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
-)
-
-type remoteProxy struct {
- clients map[string]*keepclient.KeepClient
- mtx sync.Mutex
-}
-
-func (rp *remoteProxy) Get(ctx context.Context, w http.ResponseWriter, r *http.Request, cluster *arvados.Cluster, volmgr *RRVolumeManager) {
- // Intervening proxies must not return a cached GET response
- // to a prior request if a X-Keep-Signature request header has
- // been added or changed.
- w.Header().Add("Vary", "X-Keep-Signature")
-
- token := GetAPIToken(r)
- if token == "" {
- http.Error(w, "no token provided in Authorization header", http.StatusUnauthorized)
- return
- }
- if strings.SplitN(r.Header.Get("X-Keep-Signature"), ",", 2)[0] == "local" {
- buf, err := getBufferWithContext(ctx, bufs, BlockSize)
- if err != nil {
- http.Error(w, err.Error(), http.StatusServiceUnavailable)
- return
- }
- defer bufs.Put(buf)
- rrc := &remoteResponseCacher{
- Locator: r.URL.Path[1:],
- Token: token,
- Buffer: buf[:0],
- ResponseWriter: w,
- Context: ctx,
- Cluster: cluster,
- VolumeManager: volmgr,
- }
- defer rrc.Close()
- w = rrc
- }
- var remoteClient *keepclient.KeepClient
- var parts []string
- for i, part := range strings.Split(r.URL.Path[1:], "+") {
- switch {
- case i == 0:
- // don't try to parse hash part as hint
- case strings.HasPrefix(part, "A"):
- // drop local permission hint
- continue
- case len(part) > 7 && part[0] == 'R' && part[6] == '-':
- remoteID := part[1:6]
- remote, ok := cluster.RemoteClusters[remoteID]
- if !ok {
- http.Error(w, "remote cluster not configured", http.StatusBadRequest)
- return
- }
- kc, err := rp.remoteClient(remoteID, remote, token)
- if err == auth.ErrObsoleteToken {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- } else if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- remoteClient = kc
- part = "A" + part[7:]
- }
- parts = append(parts, part)
- }
- if remoteClient == nil {
- http.Error(w, "bad request", http.StatusBadRequest)
- return
- }
- locator := strings.Join(parts, "+")
- rdr, _, _, err := remoteClient.Get(locator)
- switch err.(type) {
- case nil:
- defer rdr.Close()
- io.Copy(w, rdr)
- case *keepclient.ErrNotFound:
- http.Error(w, err.Error(), http.StatusNotFound)
- default:
- http.Error(w, err.Error(), http.StatusBadGateway)
- }
-}
-
-func (rp *remoteProxy) remoteClient(remoteID string, remoteCluster arvados.RemoteCluster, token string) (*keepclient.KeepClient, error) {
- rp.mtx.Lock()
- kc, ok := rp.clients[remoteID]
- rp.mtx.Unlock()
- if !ok {
- c := &arvados.Client{
- APIHost: remoteCluster.Host,
- AuthToken: "xxx",
- Insecure: remoteCluster.Insecure,
- }
- ac, err := arvadosclient.New(c)
- if err != nil {
- return nil, err
- }
- kc, err = keepclient.MakeKeepClient(ac)
- if err != nil {
- return nil, err
- }
-
- rp.mtx.Lock()
- if rp.clients == nil {
- rp.clients = map[string]*keepclient.KeepClient{remoteID: kc}
- } else {
- rp.clients[remoteID] = kc
- }
- rp.mtx.Unlock()
- }
- accopy := *kc.Arvados
- accopy.ApiToken = token
- kccopy := *kc
- kccopy.Arvados = &accopy
- token, err := auth.SaltToken(token, remoteID)
- if err != nil {
- return nil, err
- }
- kccopy.Arvados.ApiToken = token
- return &kccopy, nil
-}
-
-var localOrRemoteSignature = regexp.MustCompile(`\+[AR][^\+]*`)
-
-// remoteResponseCacher wraps http.ResponseWriter. It buffers the
-// response data in the provided buffer, writes/touches a copy on a
-// local volume, adds a response header with a locally-signed locator,
-// and finally writes the data through.
-type remoteResponseCacher struct {
- Locator string
- Token string
- Buffer []byte
- Context context.Context
- Cluster *arvados.Cluster
- VolumeManager *RRVolumeManager
- http.ResponseWriter
- statusCode int
-}
-
-func (rrc *remoteResponseCacher) Write(p []byte) (int, error) {
- if len(rrc.Buffer)+len(p) > cap(rrc.Buffer) {
- return 0, errors.New("buffer full")
- }
- rrc.Buffer = append(rrc.Buffer, p...)
- return len(p), nil
-}
-
-func (rrc *remoteResponseCacher) WriteHeader(statusCode int) {
- rrc.statusCode = statusCode
-}
-
-func (rrc *remoteResponseCacher) Close() error {
- if rrc.statusCode == 0 {
- rrc.statusCode = http.StatusOK
- } else if rrc.statusCode != http.StatusOK {
- rrc.ResponseWriter.WriteHeader(rrc.statusCode)
- rrc.ResponseWriter.Write(rrc.Buffer)
- return nil
- }
- _, err := PutBlock(rrc.Context, rrc.VolumeManager, rrc.Buffer, rrc.Locator[:32], nil)
- if rrc.Context.Err() != nil {
- // If caller hung up, log that instead of subsequent/misleading errors.
- http.Error(rrc.ResponseWriter, rrc.Context.Err().Error(), http.StatusGatewayTimeout)
- return err
- }
- if err == RequestHashError {
- http.Error(rrc.ResponseWriter, "checksum mismatch in remote response", http.StatusBadGateway)
- return err
- }
- if err, ok := err.(*KeepError); ok {
- http.Error(rrc.ResponseWriter, err.Error(), err.HTTPCode)
- return err
- }
- if err != nil {
- http.Error(rrc.ResponseWriter, err.Error(), http.StatusBadGateway)
- return err
- }
-
- unsigned := localOrRemoteSignature.ReplaceAllLiteralString(rrc.Locator, "")
- expiry := time.Now().Add(rrc.Cluster.Collections.BlobSigningTTL.Duration())
- signed := SignLocator(rrc.Cluster, unsigned, rrc.Token, expiry)
- if signed == unsigned {
- err = errors.New("could not sign locator")
- http.Error(rrc.ResponseWriter, err.Error(), http.StatusInternalServerError)
- return err
- }
- rrc.Header().Set("X-Keep-Locator", signed)
- rrc.ResponseWriter.WriteHeader(rrc.statusCode)
- _, err = rrc.ResponseWriter.Write(rrc.Buffer)
- return err
-}
diff --git a/services/keepstore/proxy_remote_test.go b/services/keepstore/proxy_remote_test.go
index 534371cc0e..886754e14a 100644
--- a/services/keepstore/proxy_remote_test.go
+++ b/services/keepstore/proxy_remote_test.go
@@ -5,7 +5,6 @@
package keepstore
import (
- "context"
"crypto/md5"
"encoding/json"
"fmt"
@@ -20,16 +19,18 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/prometheus/client_golang/prometheus"
check "gopkg.in/check.v1"
)
-var _ = check.Suite(&ProxyRemoteSuite{})
+var _ = check.Suite(&proxyRemoteSuite{})
-type ProxyRemoteSuite struct {
+type proxyRemoteSuite struct {
cluster *arvados.Cluster
- handler *handler
+ handler *router
remoteClusterID string
remoteBlobSigningKey []byte
@@ -40,7 +41,7 @@ type ProxyRemoteSuite struct {
remoteAPI *httptest.Server
}
-func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
+func (s *proxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
expectToken, err := auth.SaltToken(arvadostest.ActiveTokenV2, s.remoteClusterID)
if err != nil {
panic(err)
@@ -57,7 +58,7 @@ func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http
http.Error(w, "404", 404)
}
-func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
+func (s *proxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
host, port, _ := net.SplitHostPort(strings.Split(s.remoteKeepproxy.URL, "//")[1])
portnum, _ := strconv.Atoi(port)
if r.URL.Path == "/arvados/v1/discovery/v1/rest" {
@@ -81,15 +82,13 @@ func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Reque
http.Error(w, "404", 404)
}
-func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
+func (s *proxyRemoteSuite) SetUpTest(c *check.C) {
s.remoteClusterID = "z0000"
s.remoteBlobSigningKey = []byte("3b6df6fb6518afe12922a5bc8e67bf180a358bc8")
- s.remoteKeepproxy = httptest.NewServer(http.HandlerFunc(s.remoteKeepproxyHandler))
+ s.remoteKeepproxy = httptest.NewServer(httpserver.LogRequests(http.HandlerFunc(s.remoteKeepproxyHandler)))
s.remoteAPI = httptest.NewUnstartedServer(http.HandlerFunc(s.remoteAPIHandler))
s.remoteAPI.StartTLS()
s.cluster = testCluster(c)
- s.cluster.Collections.BlobSigningKey = knownKey
- s.cluster.SystemRootToken = arvadostest.SystemRootToken
s.cluster.RemoteClusters = map[string]arvados.RemoteCluster{
s.remoteClusterID: {
Host: strings.Split(s.remoteAPI.URL, "//")[1],
@@ -98,17 +97,21 @@ func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
Insecure: true,
},
}
- s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "mock"}}
- s.handler = &handler{}
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+ s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "stub"}}
}
-func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
+func (s *proxyRemoteSuite) TearDownTest(c *check.C) {
s.remoteAPI.Close()
s.remoteKeepproxy.Close()
}
-func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
+func (s *proxyRemoteSuite) TestProxyRemote(c *check.C) {
+ reg := prometheus.NewRegistry()
+ router, cancel := testRouter(c, s.cluster, reg)
+ defer cancel()
+ instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+ handler := httpserver.LogRequests(instrumented.ServeAPI(s.cluster.ManagementToken, instrumented))
+
data := []byte("foo bar")
s.remoteKeepData = data
locator := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
@@ -172,7 +175,7 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
expectSignature: true,
},
} {
- c.Logf("trial: %s", trial.label)
+ c.Logf("=== trial: %s", trial.label)
s.remoteKeepRequests = 0
@@ -184,11 +187,18 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
req.Header.Set("X-Keep-Signature", trial.xKeepSignature)
}
resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, req)
+ handler.ServeHTTP(resp, req)
c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
- c.Check(resp.Code, check.Equals, trial.expectCode)
+ if !c.Check(resp.Code, check.Equals, trial.expectCode) {
+ c.Logf("resp.Code %d came with resp.Body %q", resp.Code, resp.Body.String())
+ }
if resp.Code == http.StatusOK {
- c.Check(resp.Body.String(), check.Equals, string(data))
+ if trial.method == "HEAD" {
+ c.Check(resp.Body.String(), check.Equals, "")
+ c.Check(resp.Result().ContentLength, check.Equals, int64(len(data)))
+ } else {
+ c.Check(resp.Body.String(), check.Equals, string(data))
+ }
} else {
c.Check(resp.Body.String(), check.Not(check.Equals), string(data))
}
@@ -203,13 +213,13 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
c.Check(locHdr, check.Not(check.Equals), "")
c.Check(locHdr, check.Not(check.Matches), `.*\+R.*`)
- c.Check(VerifySignature(s.cluster, locHdr, trial.token), check.IsNil)
+ c.Check(arvados.VerifySignature(locHdr, trial.token, s.cluster.Collections.BlobSigningTTL.Duration(), []byte(s.cluster.Collections.BlobSigningKey)), check.IsNil)
// Ensure block can be requested using new signature
req = httptest.NewRequest("GET", "/"+locHdr, nil)
req.Header.Set("Authorization", "Bearer "+trial.token)
resp = httptest.NewRecorder()
- s.handler.ServeHTTP(resp, req)
+ handler.ServeHTTP(resp, req)
c.Check(resp.Code, check.Equals, http.StatusOK)
c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
}
diff --git a/services/keepstore/pull_worker.go b/services/keepstore/pull_worker.go
index abe3dc3857..dc5eabaa15 100644
--- a/services/keepstore/pull_worker.go
+++ b/services/keepstore/pull_worker.go
@@ -5,90 +5,164 @@
package keepstore
import (
+ "bytes"
"context"
- "fmt"
- "io"
- "io/ioutil"
- "time"
+ "sync"
+ "sync/atomic"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/keepclient"
+ "github.com/prometheus/client_golang/prometheus"
)
-// RunPullWorker receives PullRequests from pullq, invokes
-// PullItemAndProcess on each one. After each PR, it logs a message
-// indicating whether the pull was successful.
-func (h *handler) runPullWorker(pullq *WorkQueue) {
- for item := range pullq.NextItem {
- pr := item.(PullRequest)
- err := h.pullItemAndProcess(pr)
- pullq.DoneItem <- struct{}{}
- if err == nil {
- h.Logger.Printf("Pull %s success", pr)
- } else {
- h.Logger.Printf("Pull %s error: %s", pr, err)
- }
- }
+type PullListItem struct {
+ Locator string `json:"locator"`
+ Servers []string `json:"servers"`
+ MountUUID string `json:"mount_uuid"` // Destination mount, or "" for "anywhere"
}
-// PullItemAndProcess executes a pull request by retrieving the
-// specified block from one of the specified servers, and storing it
-// on a local volume.
-//
-// If the PR specifies a non-blank mount UUID, PullItemAndProcess will
-// only attempt to write the data to the corresponding
-// volume. Otherwise it writes to any local volume, as a PUT request
-// would.
-func (h *handler) pullItemAndProcess(pullRequest PullRequest) error {
- var vol *VolumeMount
- if uuid := pullRequest.MountUUID; uuid != "" {
- vol = h.volmgr.Lookup(pullRequest.MountUUID, true)
- if vol == nil {
- return fmt.Errorf("pull req has nonexistent mount: %v", pullRequest)
- }
- }
+type puller struct {
+ keepstore *keepstore
+ todo []PullListItem
+ cond *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+ inprogress atomic.Int64
+}
- // Make a private copy of keepClient so we can set
- // ServiceRoots to the source servers specified in the pull
- // request.
- keepClient := *h.keepClient
- serviceRoots := make(map[string]string)
- for _, addr := range pullRequest.Servers {
- serviceRoots[addr] = addr
+func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *puller {
+ p := &puller{
+ keepstore: keepstore,
+ cond: sync.NewCond(&sync.Mutex{}),
}
- keepClient.SetServiceRoots(serviceRoots, nil, nil)
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "pull_queue_pending_entries",
+ Help: "Number of queued pull requests",
+ },
+ func() float64 {
+ p.cond.L.Lock()
+ defer p.cond.L.Unlock()
+ return float64(len(p.todo))
+ },
+ ))
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "pull_queue_inprogress_entries",
+ Help: "Number of pull requests in progress",
+ },
+ func() float64 {
+ return float64(p.inprogress.Load())
+ },
+ ))
+ if len(p.keepstore.mountsW) == 0 {
+ keepstore.logger.Infof("not running pull worker because there are no writable volumes")
+ return p
+ }
+ for i := 0; i < 1 || i < keepstore.cluster.Collections.BlobReplicateConcurrency; i++ {
+ go p.runWorker(ctx)
+ }
+ return p
+}
- signedLocator := SignLocator(h.Cluster, pullRequest.Locator, keepClient.Arvados.ApiToken, time.Now().Add(time.Minute))
+func (p *puller) SetPullList(newlist []PullListItem) {
+ p.cond.L.Lock()
+ p.todo = newlist
+ p.cond.L.Unlock()
+ p.cond.Broadcast()
+}
- reader, contentLen, _, err := GetContent(signedLocator, &keepClient)
- if err != nil {
- return err
+func (p *puller) runWorker(ctx context.Context) {
+ if len(p.keepstore.mountsW) == 0 {
+ p.keepstore.logger.Infof("not running pull worker because there are no writable volumes")
+ return
}
- if reader == nil {
- return fmt.Errorf("No reader found for : %s", signedLocator)
+ c, err := arvados.NewClientFromConfig(p.keepstore.cluster)
+ if err != nil {
+ p.keepstore.logger.Errorf("error setting up pull worker: %s", err)
+ return
}
- defer reader.Close()
-
- readContent, err := ioutil.ReadAll(reader)
+ c.AuthToken = "keepstore-token-used-for-pulling-data-from-same-cluster"
+ ac, err := arvadosclient.New(c)
if err != nil {
- return err
+ p.keepstore.logger.Errorf("error setting up pull worker: %s", err)
+ return
}
-
- if (readContent == nil) || (int64(len(readContent)) != contentLen) {
- return fmt.Errorf("Content not found for: %s", signedLocator)
+ keepClient := &keepclient.KeepClient{
+ Arvados: ac,
+ Want_replicas: 1,
+ DiskCacheSize: keepclient.DiskCacheDisabled,
}
+ // Ensure the loop below wakes up and returns when ctx
+ // cancels, even if pull list is empty.
+ go func() {
+ <-ctx.Done()
+ p.cond.Broadcast()
+ }()
+ for {
+ p.cond.L.Lock()
+ for len(p.todo) == 0 && ctx.Err() == nil {
+ p.cond.Wait()
+ }
+ if ctx.Err() != nil {
+ return
+ }
+ item := p.todo[0]
+ p.todo = p.todo[1:]
+ p.inprogress.Add(1)
+ p.cond.L.Unlock()
- return writePulledBlock(h.volmgr, vol, readContent, pullRequest.Locator)
-}
+ func() {
+ defer p.inprogress.Add(-1)
-// GetContent fetches the content for the given locator using keepclient.
-var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (io.ReadCloser, int64, string, error) {
- return keepClient.Get(signedLocator)
-}
+ logger := p.keepstore.logger.WithField("locator", item.Locator)
+
+ li, err := getLocatorInfo(item.Locator)
+ if err != nil {
+ logger.Warn("ignoring pull request for invalid locator")
+ return
+ }
+
+ var dst *mount
+ if item.MountUUID != "" {
+ dst = p.keepstore.mounts[item.MountUUID]
+ if dst == nil {
+ logger.Warnf("ignoring pull list entry for nonexistent mount %s", item.MountUUID)
+ return
+ } else if !dst.AllowWrite {
+ logger.Warnf("ignoring pull list entry for readonly mount %s", item.MountUUID)
+ return
+ }
+ } else {
+ dst = p.keepstore.rendezvous(item.Locator, p.keepstore.mountsW)[0]
+ }
+
+ serviceRoots := make(map[string]string)
+ for _, addr := range item.Servers {
+ serviceRoots[addr] = addr
+ }
+ keepClient.SetServiceRoots(serviceRoots, nil, nil)
+
+ signedLocator := p.keepstore.signLocator(c.AuthToken, item.Locator)
-var writePulledBlock = func(volmgr *RRVolumeManager, volume Volume, data []byte, locator string) error {
- if volume != nil {
- return volume.Put(context.Background(), locator, data)
+ buf := bytes.NewBuffer(nil)
+ _, err = keepClient.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: signedLocator,
+ WriteTo: buf,
+ })
+ if err != nil {
+ logger.WithError(err).Warnf("error pulling data from remote servers (%s)", item.Servers)
+ return
+ }
+ err = dst.BlockWrite(ctx, li.hash, buf.Bytes())
+ if err != nil {
+ logger.WithError(err).Warnf("error writing data to %s", dst.UUID)
+ return
+ }
+ logger.Info("block pulled")
+ }()
}
- _, err := PutBlock(context.Background(), volmgr, data, locator, nil)
- return err
}
diff --git a/services/keepstore/pull_worker_integration_test.go b/services/keepstore/pull_worker_integration_test.go
deleted file mode 100644
index 3855b4ecd3..0000000000
--- a/services/keepstore/pull_worker_integration_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "bytes"
- "context"
- "errors"
- "io"
- "io/ioutil"
- "strings"
-
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
- "github.com/prometheus/client_golang/prometheus"
- check "gopkg.in/check.v1"
-)
-
-type PullWorkIntegrationTestData struct {
- Name string
- Locator string
- Content string
- GetError string
-}
-
-func (s *HandlerSuite) setupPullWorkerIntegrationTest(c *check.C, testData PullWorkIntegrationTestData, wantData bool) PullRequest {
- arvadostest.StartKeep(2, false)
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- // Put content if the test needs it
- if wantData {
- locator, _, err := s.handler.keepClient.PutB([]byte(testData.Content))
- if err != nil {
- c.Errorf("Error putting test data in setup for %s %s %v", testData.Content, locator, err)
- }
- if locator == "" {
- c.Errorf("No locator found after putting test data")
- }
- }
-
- // Create pullRequest for the test
- pullRequest := PullRequest{
- Locator: testData.Locator,
- }
- return pullRequest
-}
-
-// Do a get on a block that is not existing in any of the keep servers.
-// Expect "block not found" error.
-func (s *HandlerSuite) TestPullWorkerIntegration_GetNonExistingLocator(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- testData := PullWorkIntegrationTestData{
- Name: "TestPullWorkerIntegration_GetLocator",
- Locator: "5d41402abc4b2a76b9719d911017c592",
- Content: "hello",
- GetError: "Block not found",
- }
-
- pullRequest := s.setupPullWorkerIntegrationTest(c, testData, false)
- defer arvadostest.StopKeep(2)
-
- s.performPullWorkerIntegrationTest(testData, pullRequest, c)
-}
-
-// Do a get on a block that exists on one of the keep servers.
-// The setup method will create this block before doing the get.
-func (s *HandlerSuite) TestPullWorkerIntegration_GetExistingLocator(c *check.C) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- testData := PullWorkIntegrationTestData{
- Name: "TestPullWorkerIntegration_GetLocator",
- Locator: "5d41402abc4b2a76b9719d911017c592",
- Content: "hello",
- GetError: "",
- }
-
- pullRequest := s.setupPullWorkerIntegrationTest(c, testData, true)
- defer arvadostest.StopKeep(2)
-
- s.performPullWorkerIntegrationTest(testData, pullRequest, c)
-}
-
-// Perform the test.
-// The test directly invokes the "PullItemAndProcess" rather than
-// putting an item on the pullq so that the errors can be verified.
-func (s *HandlerSuite) performPullWorkerIntegrationTest(testData PullWorkIntegrationTestData, pullRequest PullRequest, c *check.C) {
-
- // Override writePulledBlock to mock PutBlock functionality
- defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
- writePulledBlock = func(_ *RRVolumeManager, _ Volume, content []byte, _ string) error {
- c.Check(string(content), check.Equals, testData.Content)
- return nil
- }
-
- // Override GetContent to mock keepclient Get functionality
- defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
- GetContent = orig
- }(GetContent)
- GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
- if testData.GetError != "" {
- return nil, 0, "", errors.New(testData.GetError)
- }
- rdr := ioutil.NopCloser(bytes.NewBufferString(testData.Content))
- return rdr, int64(len(testData.Content)), "", nil
- }
-
- err := s.handler.pullItemAndProcess(pullRequest)
-
- if len(testData.GetError) > 0 {
- if (err == nil) || (!strings.Contains(err.Error(), testData.GetError)) {
- c.Errorf("Got error %v, expected %v", err, testData.GetError)
- }
- } else {
- if err != nil {
- c.Errorf("Got error %v, expected nil", err)
- }
- }
-}
diff --git a/services/keepstore/pull_worker_test.go b/services/keepstore/pull_worker_test.go
index 2626e66d88..d109b56df3 100644
--- a/services/keepstore/pull_worker_test.go
+++ b/services/keepstore/pull_worker_test.go
@@ -7,309 +7,130 @@ package keepstore
import (
"bytes"
"context"
+ "crypto/md5"
+ "encoding/json"
"errors"
+ "fmt"
"io"
- "io/ioutil"
"net/http"
+ "net/http/httptest"
+ "sort"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/keepclient"
- "github.com/prometheus/client_golang/prometheus"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
- check "gopkg.in/check.v1"
)
-var _ = Suite(&PullWorkerTestSuite{})
-
-type PullWorkerTestSuite struct {
- cluster *arvados.Cluster
- handler *handler
-
- testPullLists map[string]string
- readContent string
- readError error
- putContent []byte
- putError error
-}
-
-func (s *PullWorkerTestSuite) SetUpTest(c *C) {
- s.cluster = testCluster(c)
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-000000000000000": {Driver: "mock"},
- "zzzzz-nyw5e-111111111111111": {Driver: "mock"},
+func (s *routerSuite) TestPullList_Execute(c *C) {
+ remotecluster := testCluster(c)
+ remotecluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-rrrrrrrrrrrrrrr": {Replication: 1, Driver: "stub"},
}
- s.cluster.Collections.BlobReplicateConcurrency = 1
-
- s.handler = &handler{}
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
- s.readContent = ""
- s.readError = nil
- s.putContent = []byte{}
- s.putError = nil
-
- // When a new pull request arrives, the old one will be overwritten.
- // This behavior is verified using these two maps in the
- // "TestPullWorkerPullList_with_two_items_latest_replacing_old"
- s.testPullLists = make(map[string]string)
-}
-
-var firstPullList = []byte(`[
- {
- "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
- "servers":[
- "server_1",
- "server_2"
- ]
- },{
- "locator":"37b51d194a7513e45b56f6524f2d51f2+3",
- "servers":[
- "server_3"
- ]
- }
- ]`)
-
-var secondPullList = []byte(`[
- {
- "locator":"73feffa4b7f6bb68e44cf984c85f6e88+3",
- "servers":[
- "server_1",
- "server_2"
- ]
- }
- ]`)
-
-type PullWorkerTestData struct {
- name string
- req RequestTester
- responseCode int
- responseBody string
- readContent string
- readError bool
- putError bool
-}
-
-// Ensure MountUUID in a pull list is correctly translated to a Volume
-// argument passed to writePulledBlock().
-func (s *PullWorkerTestSuite) TestSpecifyMountUUID(c *C) {
- defer func(f func(*RRVolumeManager, Volume, []byte, string) error) {
- writePulledBlock = f
- }(writePulledBlock)
- pullq := s.handler.Handler.(*router).pullq
-
- for _, spec := range []struct {
- sendUUID string
- expectVolume Volume
- }{
- {
- sendUUID: "",
- expectVolume: nil,
- },
- {
- sendUUID: s.handler.volmgr.Mounts()[0].UUID,
- expectVolume: s.handler.volmgr.Mounts()[0].Volume,
- },
- } {
- writePulledBlock = func(_ *RRVolumeManager, v Volume, _ []byte, _ string) error {
- c.Check(v, Equals, spec.expectVolume)
- return nil
+ remoterouter, cancel := testRouter(c, remotecluster, nil)
+ defer cancel()
+ remoteserver := httptest.NewServer(remoterouter)
+ defer remoteserver.Close()
+
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ executePullList := func(pullList []PullListItem) string {
+ var logbuf bytes.Buffer
+ logger := logrus.New()
+ logger.Out = &logbuf
+ router.keepstore.logger = logger
+
+ listjson, err := json.Marshal(pullList)
+ c.Assert(err, IsNil)
+ resp := call(router, "PUT", "http://example/pull", s.cluster.SystemRootToken, listjson, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ for {
+ router.puller.cond.L.Lock()
+ todolen := len(router.puller.todo)
+ router.puller.cond.L.Unlock()
+ if todolen == 0 && router.puller.inprogress.Load() == 0 {
+ break
+ }
+ time.Sleep(time.Millisecond)
}
-
- resp := IssueRequest(s.handler, &RequestTester{
- uri: "/pull",
- apiToken: s.cluster.SystemRootToken,
- method: "PUT",
- requestBody: []byte(`[{
- "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
- "servers":["server_1","server_2"],
- "mount_uuid":"` + spec.sendUUID + `"}]`),
- })
- c.Assert(resp.Code, Equals, http.StatusOK)
- expectEqualWithin(c, time.Second, 0, func() interface{} {
- st := pullq.Status()
- return st.InProgress + st.Queued
- })
- }
-}
-
-func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_two_locators(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorkerPullList_with_two_locators",
- req: RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
- responseCode: http.StatusOK,
- responseBody: "Received 2 pull requests\n",
- readContent: "hello",
- readError: false,
- putError: false,
- }
-
- s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_one_locator(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorkerPullList_with_one_locator",
- req: RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
- responseCode: http.StatusOK,
- responseBody: "Received 1 pull requests\n",
- readContent: "hola",
- readError: false,
- putError: false,
- }
-
- s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_one_locator(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorker_error_on_get_one_locator",
- req: RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
- responseCode: http.StatusOK,
- responseBody: "Received 1 pull requests\n",
- readContent: "unused",
- readError: true,
- putError: false,
+ return logbuf.String()
}
- s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_two_locators(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorker_error_on_get_two_locators",
- req: RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
- responseCode: http.StatusOK,
- responseBody: "Received 2 pull requests\n",
- readContent: "unused",
- readError: true,
- putError: false,
- }
-
- s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_one_locator(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorker_error_on_put_one_locator",
- req: RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
- responseCode: http.StatusOK,
- responseBody: "Received 1 pull requests\n",
- readContent: "hello hello",
- readError: false,
- putError: true,
- }
-
- s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_two_locators(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorker_error_on_put_two_locators",
- req: RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
- responseCode: http.StatusOK,
- responseBody: "Received 2 pull requests\n",
- readContent: "hello again",
- readError: false,
- putError: true,
- }
-
- s.performTest(testData, c)
-}
-
-// In this case, the item will not be placed on pullq
-func (s *PullWorkerTestSuite) TestPullWorker_invalidToken(c *C) {
- testData := PullWorkerTestData{
- name: "TestPullWorkerPullList_with_two_locators",
- req: RequestTester{"/pull", "invalidToken", "PUT", firstPullList, ""},
- responseCode: http.StatusUnauthorized,
- responseBody: "Unauthorized\n",
- readContent: "hello",
- readError: false,
- putError: false,
- }
-
- s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) performTest(testData PullWorkerTestData, c *C) {
- pullq := s.handler.Handler.(*router).pullq
-
- s.testPullLists[testData.name] = testData.responseBody
-
- processedPullLists := make(map[string]string)
-
- // Override GetContent to mock keepclient Get functionality
- defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
- GetContent = orig
- }(GetContent)
- GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
- c.Assert(getStatusItem(s.handler, "PullQueue", "InProgress"), Equals, float64(1))
- processedPullLists[testData.name] = testData.responseBody
- if testData.readError {
- err = errors.New("Error getting data")
- s.readError = err
- return
- }
- s.readContent = testData.readContent
- reader = ioutil.NopCloser(bytes.NewBufferString(testData.readContent))
- contentLength = int64(len(testData.readContent))
- return
+ newRemoteBlock := func(datastring string) string {
+ data := []byte(datastring)
+ hash := fmt.Sprintf("%x", md5.Sum(data))
+ locator := fmt.Sprintf("%s+%d", hash, len(data))
+ _, err := remoterouter.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+ Hash: hash,
+ Data: data,
+ })
+ c.Assert(err, IsNil)
+ return locator
}
- // Override writePulledBlock to mock PutBlock functionality
- defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
- writePulledBlock = func(_ *RRVolumeManager, v Volume, content []byte, locator string) error {
- if testData.putError {
- s.putError = errors.New("Error putting data")
- return s.putError
- }
- s.putContent = content
- return nil
+ mounts := append([]*mount(nil), router.keepstore.mountsR...)
+ sort.Slice(mounts, func(i, j int) bool { return mounts[i].UUID < mounts[j].UUID })
+ var vols []*stubVolume
+ for _, mount := range mounts {
+ vols = append(vols, mount.volume.(*stubVolume))
}
- c.Check(getStatusItem(s.handler, "PullQueue", "InProgress"), Equals, float64(0))
- c.Check(getStatusItem(s.handler, "PullQueue", "Queued"), Equals, float64(0))
- c.Check(getStatusItem(s.handler, "Version"), Not(Equals), "")
-
- response := IssueRequest(s.handler, &testData.req)
- c.Assert(response.Code, Equals, testData.responseCode)
- c.Assert(response.Body.String(), Equals, testData.responseBody)
+ ctx := authContext(arvadostest.ActiveTokenV2)
- expectEqualWithin(c, time.Second, 0, func() interface{} {
- st := pullq.Status()
- return st.InProgress + st.Queued
- })
+ locator := newRemoteBlock("pull available block to unspecified volume")
+ executePullList([]PullListItem{{
+ Locator: locator,
+ Servers: []string{remoteserver.URL}}})
+ _, err := router.keepstore.BlockRead(ctx, arvados.BlockReadOptions{
+ Locator: router.keepstore.signLocator(arvadostest.ActiveTokenV2, locator),
+ WriteTo: io.Discard})
+ c.Check(err, IsNil)
- if testData.name == "TestPullWorkerPullList_with_two_items_latest_replacing_old" {
- c.Assert(len(s.testPullLists), Equals, 2)
- c.Assert(len(processedPullLists), Equals, 1)
- c.Assert(s.testPullLists["Added_before_actual_test_item"], NotNil)
- c.Assert(s.testPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
- c.Assert(processedPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
- } else {
- if testData.responseCode == http.StatusOK {
- c.Assert(len(s.testPullLists), Equals, 1)
- c.Assert(len(processedPullLists), Equals, 1)
- c.Assert(s.testPullLists[testData.name], NotNil)
- } else {
- c.Assert(len(s.testPullLists), Equals, 1)
- c.Assert(len(processedPullLists), Equals, 0)
- }
- }
-
- if testData.readError {
- c.Assert(s.readError, NotNil)
- } else if testData.responseCode == http.StatusOK {
- c.Assert(s.readError, IsNil)
- c.Assert(s.readContent, Equals, testData.readContent)
- if testData.putError {
- c.Assert(s.putError, NotNil)
- } else {
- c.Assert(s.putError, IsNil)
- c.Assert(string(s.putContent), Equals, testData.readContent)
- }
- }
-
- expectChannelEmpty(c, pullq.NextItem)
+ locator0 := newRemoteBlock("pull available block to specified volume 0")
+ locator1 := newRemoteBlock("pull available block to specified volume 1")
+ executePullList([]PullListItem{
+ {
+ Locator: locator0,
+ Servers: []string{remoteserver.URL},
+ MountUUID: vols[0].params.UUID},
+ {
+ Locator: locator1,
+ Servers: []string{remoteserver.URL},
+ MountUUID: vols[1].params.UUID}})
+ c.Check(vols[0].data[locator0[:32]].data, NotNil)
+ c.Check(vols[1].data[locator1[:32]].data, NotNil)
+
+ locator = fooHash + "+3"
+ logs := executePullList([]PullListItem{{
+ Locator: locator,
+ Servers: []string{remoteserver.URL}}})
+ c.Check(logs, Matches, ".*error pulling data from remote servers.*Block not found.*locator=acbd.*\n")
+
+ locator = fooHash + "+3"
+ logs = executePullList([]PullListItem{{
+ Locator: locator,
+ Servers: []string{"http://0.0.0.0:9/"}}})
+ c.Check(logs, Matches, ".*error pulling data from remote servers.*connection refused.*locator=acbd.*\n")
+
+ locator = newRemoteBlock("log error writing to local volume")
+ vols[0].blockWrite = func(context.Context, string, []byte) error { return errors.New("test error") }
+ vols[1].blockWrite = vols[0].blockWrite
+ logs = executePullList([]PullListItem{{
+ Locator: locator,
+ Servers: []string{remoteserver.URL}}})
+ c.Check(logs, Matches, ".*error writing data to zzzzz-nyw5e-.*error=\"test error\".*locator=.*\n")
+ vols[0].blockWrite = nil
+ vols[1].blockWrite = nil
+
+ locator = newRemoteBlock("log error when destination mount does not exist")
+ logs = executePullList([]PullListItem{{
+ Locator: locator,
+ Servers: []string{remoteserver.URL},
+ MountUUID: "bogus-mount-uuid"}})
+ c.Check(logs, Matches, ".*ignoring pull list entry for nonexistent mount bogus-mount-uuid.*locator=.*\n")
+
+ logs = executePullList([]PullListItem{})
+ c.Logf("%s", logs)
}
diff --git a/services/keepstore/putprogress.go b/services/keepstore/putprogress.go
new file mode 100644
index 0000000000..e02b2d09e9
--- /dev/null
+++ b/services/keepstore/putprogress.go
@@ -0,0 +1,101 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "github.com/sirupsen/logrus"
+)
+
+type putProgress struct {
+ classNeeded map[string]bool
+ classTodo map[string]bool
+ mountUsed map[*mount]bool
+ totalReplication int
+ classDone map[string]int
+}
+
+func (pr *putProgress) Add(mnt *mount) {
+ if pr.mountUsed[mnt] {
+ logrus.Warnf("BUG? superfluous extra write to mount %s", mnt.UUID)
+ return
+ }
+ pr.mountUsed[mnt] = true
+ pr.totalReplication += mnt.Replication
+ for class := range mnt.StorageClasses {
+ pr.classDone[class] += mnt.Replication
+ delete(pr.classTodo, class)
+ }
+}
+
+func (pr *putProgress) Sub(mnt *mount) {
+ if !pr.mountUsed[mnt] {
+ logrus.Warnf("BUG? Sub called with no prior matching Add: %s", mnt.UUID)
+ return
+ }
+ pr.mountUsed[mnt] = false
+ pr.totalReplication -= mnt.Replication
+ for class := range mnt.StorageClasses {
+ pr.classDone[class] -= mnt.Replication
+ if pr.classNeeded[class] {
+ pr.classTodo[class] = true
+ }
+ }
+}
+
+func (pr *putProgress) Done() bool {
+ return len(pr.classTodo) == 0 && pr.totalReplication > 0
+}
+
+func (pr *putProgress) Want(mnt *mount) bool {
+ if pr.Done() || pr.mountUsed[mnt] {
+ return false
+ }
+ if len(pr.classTodo) == 0 {
+ // none specified == "any"
+ return true
+ }
+ for class := range mnt.StorageClasses {
+ if pr.classTodo[class] {
+ return true
+ }
+ }
+ return false
+}
+
+func (pr *putProgress) Copy() *putProgress {
+ cp := putProgress{
+ classNeeded: pr.classNeeded,
+ classTodo: make(map[string]bool, len(pr.classTodo)),
+ classDone: make(map[string]int, len(pr.classDone)),
+ mountUsed: make(map[*mount]bool, len(pr.mountUsed)),
+ totalReplication: pr.totalReplication,
+ }
+ for k, v := range pr.classTodo {
+ cp.classTodo[k] = v
+ }
+ for k, v := range pr.classDone {
+ cp.classDone[k] = v
+ }
+ for k, v := range pr.mountUsed {
+ cp.mountUsed[k] = v
+ }
+ return &cp
+}
+
+func newPutProgress(classes []string) putProgress {
+ pr := putProgress{
+ classNeeded: make(map[string]bool, len(classes)),
+ classTodo: make(map[string]bool, len(classes)),
+ classDone: map[string]int{},
+ mountUsed: map[*mount]bool{},
+ }
+ for _, c := range classes {
+ if c != "" {
+ pr.classNeeded[c] = true
+ pr.classTodo[c] = true
+ }
+ }
+ return pr
+}
diff --git a/services/keepstore/router.go b/services/keepstore/router.go
new file mode 100644
index 0000000000..0c8182c6ea
--- /dev/null
+++ b/services/keepstore/router.go
@@ -0,0 +1,276 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "sync/atomic"
+
+ "git.arvados.org/arvados.git/lib/service"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/gorilla/mux"
+)
+
+type router struct {
+ http.Handler
+ keepstore *keepstore
+ puller *puller
+ trasher *trasher
+}
+
+func newRouter(keepstore *keepstore, puller *puller, trasher *trasher) service.Handler {
+ rtr := &router{
+ keepstore: keepstore,
+ puller: puller,
+ trasher: trasher,
+ }
+ adminonly := func(h http.HandlerFunc) http.HandlerFunc {
+ return auth.RequireLiteralToken(keepstore.cluster.SystemRootToken, h).ServeHTTP
+ }
+
+ r := mux.NewRouter()
+ locatorPath := `/{locator:[0-9a-f]{32}.*}`
+ get := r.Methods(http.MethodGet, http.MethodHead).Subrouter()
+ get.HandleFunc(locatorPath, rtr.handleBlockRead)
+ get.HandleFunc(`/index`, adminonly(rtr.handleIndex))
+ get.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, adminonly(rtr.handleIndex))
+ get.HandleFunc(`/mounts`, adminonly(rtr.handleMounts))
+ get.HandleFunc(`/mounts/{uuid}/blocks`, adminonly(rtr.handleIndex))
+ get.HandleFunc(`/mounts/{uuid}/blocks/{prefix:[0-9a-f]{0,32}}`, adminonly(rtr.handleIndex))
+ put := r.Methods(http.MethodPut).Subrouter()
+ put.HandleFunc(locatorPath, rtr.handleBlockWrite)
+ put.HandleFunc(`/pull`, adminonly(rtr.handlePullList))
+ put.HandleFunc(`/trash`, adminonly(rtr.handleTrashList))
+ put.HandleFunc(`/untrash`+locatorPath, adminonly(rtr.handleUntrash))
+ touch := r.Methods("TOUCH").Subrouter()
+ touch.HandleFunc(locatorPath, adminonly(rtr.handleBlockTouch))
+ delete := r.Methods(http.MethodDelete).Subrouter()
+ delete.HandleFunc(locatorPath, adminonly(rtr.handleBlockTrash))
+ r.NotFoundHandler = http.HandlerFunc(rtr.handleBadRequest)
+ r.MethodNotAllowedHandler = http.HandlerFunc(rtr.handleBadRequest)
+ rtr.Handler = auth.LoadToken(r)
+ return rtr
+}
+
+func (rtr *router) CheckHealth() error {
+ return nil
+}
+
+func (rtr *router) Done() <-chan struct{} {
+ return nil
+}
+
+func (rtr *router) handleBlockRead(w http.ResponseWriter, req *http.Request) {
+ // Intervening proxies must not return a cached GET response
+ // to a prior request if a X-Keep-Signature request header has
+ // been added or changed.
+ w.Header().Add("Vary", "X-Keep-Signature")
+ var localLocator func(string)
+ if strings.SplitN(req.Header.Get("X-Keep-Signature"), ",", 2)[0] == "local" {
+ localLocator = func(locator string) {
+ w.Header().Set("X-Keep-Locator", locator)
+ }
+ }
+ out := w
+ if req.Method == http.MethodHead {
+ out = discardWrite{ResponseWriter: w}
+ } else if li, err := getLocatorInfo(mux.Vars(req)["locator"]); err != nil {
+ rtr.handleError(w, req, err)
+ return
+ } else if li.size == 0 && li.hash != "d41d8cd98f00b204e9800998ecf8427e" {
+ // GET {hash} (with no size hint) is not allowed
+ // because we can't report md5 mismatches.
+ rtr.handleError(w, req, errMethodNotAllowed)
+ return
+ }
+ n, err := rtr.keepstore.BlockRead(req.Context(), arvados.BlockReadOptions{
+ Locator: mux.Vars(req)["locator"],
+ WriteTo: out,
+ LocalLocator: localLocator,
+ })
+ if err != nil && (n == 0 || req.Method == http.MethodHead) {
+ rtr.handleError(w, req, err)
+ return
+ }
+}
+
+func (rtr *router) handleBlockWrite(w http.ResponseWriter, req *http.Request) {
+ dataSize, _ := strconv.Atoi(req.Header.Get("Content-Length"))
+ replicas, _ := strconv.Atoi(req.Header.Get("X-Arvados-Replicas-Desired"))
+ resp, err := rtr.keepstore.BlockWrite(req.Context(), arvados.BlockWriteOptions{
+ Hash: mux.Vars(req)["locator"],
+ Reader: req.Body,
+ DataSize: dataSize,
+ RequestID: req.Header.Get("X-Request-Id"),
+ StorageClasses: trimSplit(req.Header.Get("X-Keep-Storage-Classes"), ","),
+ Replicas: replicas,
+ })
+ if err != nil {
+ rtr.handleError(w, req, err)
+ return
+ }
+ w.Header().Set("X-Keep-Replicas-Stored", fmt.Sprintf("%d", resp.Replicas))
+ scc := ""
+ for k, n := range resp.StorageClasses {
+ if n > 0 {
+ if scc != "" {
+ scc += "; "
+ }
+ scc += fmt.Sprintf("%s=%d", k, n)
+ }
+ }
+ w.Header().Set("X-Keep-Storage-Classes-Confirmed", scc)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, resp.Locator)
+}
+
+func (rtr *router) handleBlockTouch(w http.ResponseWriter, req *http.Request) {
+ err := rtr.keepstore.BlockTouch(req.Context(), mux.Vars(req)["locator"])
+ rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleBlockTrash(w http.ResponseWriter, req *http.Request) {
+ err := rtr.keepstore.BlockTrash(req.Context(), mux.Vars(req)["locator"])
+ rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleMounts(w http.ResponseWriter, req *http.Request) {
+ json.NewEncoder(w).Encode(rtr.keepstore.Mounts())
+}
+
+func (rtr *router) handleIndex(w http.ResponseWriter, req *http.Request) {
+ prefix := req.FormValue("prefix")
+ if prefix == "" {
+ prefix = mux.Vars(req)["prefix"]
+ }
+ cw := &countingWriter{writer: w}
+ err := rtr.keepstore.Index(req.Context(), indexOptions{
+ MountUUID: mux.Vars(req)["uuid"],
+ Prefix: prefix,
+ WriteTo: cw,
+ })
+ if err != nil && cw.n.Load() == 0 {
+ // Nothing was written, so it's not too late to report
+ // an error via http response header. (Otherwise, all
+ // we can do is omit the trailing newline below to
+ // indicate something went wrong.)
+ rtr.handleError(w, req, err)
+ return
+ }
+ if err == nil {
+ // A trailing blank line signals to the caller that
+ // the response is complete.
+ w.Write([]byte("\n"))
+ }
+}
+
+func (rtr *router) handlePullList(w http.ResponseWriter, req *http.Request) {
+ var pl []PullListItem
+ err := json.NewDecoder(req.Body).Decode(&pl)
+ if err != nil {
+ rtr.handleError(w, req, err)
+ return
+ }
+ req.Body.Close()
+ if len(pl) > 0 && len(pl[0].Locator) == 32 {
+ rtr.handleError(w, req, httpserver.ErrorWithStatus(errors.New("rejecting pull list containing a locator without a size hint -- this probably means keep-balance needs to be upgraded"), http.StatusBadRequest))
+ return
+ }
+ rtr.puller.SetPullList(pl)
+}
+
+func (rtr *router) handleTrashList(w http.ResponseWriter, req *http.Request) {
+ var tl []TrashListItem
+ err := json.NewDecoder(req.Body).Decode(&tl)
+ if err != nil {
+ rtr.handleError(w, req, err)
+ return
+ }
+ req.Body.Close()
+ rtr.trasher.SetTrashList(tl)
+}
+
+func (rtr *router) handleUntrash(w http.ResponseWriter, req *http.Request) {
+ err := rtr.keepstore.BlockUntrash(req.Context(), mux.Vars(req)["locator"])
+ rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleBadRequest(w http.ResponseWriter, req *http.Request) {
+ http.Error(w, "Bad Request", http.StatusBadRequest)
+}
+
+func (rtr *router) handleError(w http.ResponseWriter, req *http.Request, err error) {
+ if req.Context().Err() != nil {
+ w.WriteHeader(499)
+ return
+ }
+ if err == nil {
+ return
+ } else if os.IsNotExist(err) {
+ w.WriteHeader(http.StatusNotFound)
+ } else if statusErr := interface{ HTTPStatus() int }(nil); errors.As(err, &statusErr) {
+ w.WriteHeader(statusErr.HTTPStatus())
+ } else {
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ fmt.Fprintln(w, err.Error())
+}
+
+type countingWriter struct {
+ writer io.Writer
+ n atomic.Int64
+}
+
+func (cw *countingWriter) Write(p []byte) (int, error) {
+ n, err := cw.writer.Write(p)
+ cw.n.Add(int64(n))
+ return n, err
+}
+
+// Split s by sep, trim whitespace from each part, and drop empty
+// parts.
+func trimSplit(s, sep string) []string {
+ var r []string
+ for _, part := range strings.Split(s, sep) {
+ part = strings.TrimSpace(part)
+ if part != "" {
+ r = append(r, part)
+ }
+ }
+ return r
+}
+
+// setSizeOnWrite sets the Content-Length header to the given size on
+// first write.
+type setSizeOnWrite struct {
+ http.ResponseWriter
+ size int
+ wrote bool
+}
+
+func (ss *setSizeOnWrite) Write(p []byte) (int, error) {
+ if !ss.wrote {
+ ss.Header().Set("Content-Length", fmt.Sprintf("%d", ss.size))
+ ss.wrote = true
+ }
+ return ss.ResponseWriter.Write(p)
+}
+
+type discardWrite struct {
+ http.ResponseWriter
+}
+
+func (discardWrite) Write(p []byte) (int, error) {
+ return len(p), nil
+}
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
new file mode 100644
index 0000000000..15a055d55e
--- /dev/null
+++ b/services/keepstore/router_test.go
@@ -0,0 +1,517 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "sort"
+ "strings"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/prometheus/client_golang/prometheus"
+ . "gopkg.in/check.v1"
+)
+
+// routerSuite tests that the router correctly translates HTTP
+// requests to the appropriate keepstore functionality, and translates
+// the results to HTTP responses.
+type routerSuite struct {
+ cluster *arvados.Cluster
+}
+
+var _ = Suite(&routerSuite{})
+
+func testRouter(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*router, context.CancelFunc) {
+ if reg == nil {
+ reg = prometheus.NewRegistry()
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ ks, kcancel := testKeepstore(t, cluster, reg)
+ go func() {
+ <-ctx.Done()
+ kcancel()
+ }()
+ puller := newPuller(ctx, ks, reg)
+ trasher := newTrasher(ctx, ks, reg)
+ return newRouter(ks, puller, trasher).(*router), cancel
+}
+
+func (s *routerSuite) SetUpTest(c *C) {
+ s.cluster = testCluster(c)
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"testclass1": true}},
+ "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"testclass2": true}},
+ }
+ s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+ "testclass1": arvados.StorageClassConfig{
+ Default: true,
+ },
+ "testclass2": arvados.StorageClassConfig{
+ Default: true,
+ },
+ }
+}
+
+func (s *routerSuite) TestBlockRead_Token(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ err := router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+ c.Assert(locSigned, Not(Equals), fooHash+"+3")
+
+ // No token provided
+ resp := call(router, "GET", "http://example/"+locSigned, "", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+ c.Check(resp.Body.String(), Matches, "no token provided in Authorization header\n")
+
+ // Different token => invalid signature
+ resp = call(router, "GET", "http://example/"+locSigned, "badtoken", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusBadRequest)
+ c.Check(resp.Body.String(), Equals, "invalid signature\n")
+
+ // Correct token
+ resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "foo")
+
+ // HEAD
+ resp = call(router, "HEAD", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Result().ContentLength, Equals, int64(3))
+ c.Check(resp.Body.String(), Equals, "")
+}
+
+// As a special case we allow HEAD requests that only provide a hash
+// without a size hint. This accommodates uses of keep-block-check
+// where it's inconvenient to attach size hints to known hashes.
+//
+// GET requests must provide a size hint -- otherwise we can't
+// propagate a checksum mismatch error.
+func (s *routerSuite) TestBlockRead_NoSizeHint(c *C) {
+ s.cluster.Collections.BlobSigning = true
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+ err := router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+
+ // hash+signature
+ hashSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash)
+ resp := call(router, "GET", "http://example/"+hashSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusMethodNotAllowed)
+
+ resp = call(router, "HEAD", "http://example/"+fooHash, "", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+ resp = call(router, "HEAD", "http://example/"+fooHash+"+3", "", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+
+ s.cluster.Collections.BlobSigning = false
+ router, cancel = testRouter(c, s.cluster, nil)
+ defer cancel()
+ err = router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+
+ resp = call(router, "GET", "http://example/"+fooHash, "", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusMethodNotAllowed)
+
+ resp = call(router, "HEAD", "http://example/"+fooHash, "", nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "")
+ c.Check(resp.Result().ContentLength, Equals, int64(3))
+ c.Check(resp.Header().Get("Content-Length"), Equals, "3")
+}
+
+// By the time we discover the checksum mismatch, it's too late to
+// change the response code, but the expected block size is given in
+// the Content-Length response header, so a generic http client can
+// detect the problem.
+func (s *routerSuite) TestBlockRead_ChecksumMismatch(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ gooddata := make([]byte, 10_000_000)
+ gooddata[0] = 'a'
+ hash := fmt.Sprintf("%x", md5.Sum(gooddata))
+ locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fmt.Sprintf("%s+%d", hash, len(gooddata)))
+
+ for _, baddata := range [][]byte{
+ make([]byte, 3),
+ make([]byte, len(gooddata)),
+ make([]byte, len(gooddata)-1),
+ make([]byte, len(gooddata)+1),
+ make([]byte, len(gooddata)*2),
+ } {
+ c.Logf("=== baddata len %d", len(baddata))
+ err := router.keepstore.mountsW[0].BlockWrite(context.Background(), hash, baddata)
+ c.Assert(err, IsNil)
+
+ resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ if !c.Check(resp.Code, Equals, http.StatusOK) {
+ c.Logf("resp.Body: %s", resp.Body.String())
+ }
+ c.Check(resp.Body.Len(), Not(Equals), len(gooddata))
+ c.Check(resp.Result().ContentLength, Equals, int64(len(gooddata)))
+
+ resp = call(router, "HEAD", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusBadGateway)
+
+ hashSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, hash)
+ resp = call(router, "HEAD", "http://example/"+hashSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusBadGateway)
+ }
+}
+
+func (s *routerSuite) TestBlockWrite(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ resp := call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ locator := strings.TrimSpace(resp.Body.String())
+
+ resp = call(router, "GET", "http://example/"+locator, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "foo")
+}
+
+func (s *routerSuite) TestBlockWrite_Headers(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ resp := call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Arvados-Replicas-Desired": []string{"2"}})
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+ c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, "testclass1=1")
+
+ resp = call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Keep-Storage-Classes": []string{"testclass1"}})
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+ c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1")
+
+ resp = call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Keep-Storage-Classes": []string{" , testclass2 , "}})
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+ c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass2=1")
+}
+
+func sortCommaSeparated(s string) string {
+ slice := strings.Split(s, ", ")
+ sort.Strings(slice)
+ return strings.Join(slice, ", ")
+}
+
+func (s *routerSuite) TestBlockTouch(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ resp := call(router, "TOUCH", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusNotFound)
+
+ vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+ err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ vol1 := router.keepstore.mountsW[1].volume.(*stubVolume)
+ err = vol1.BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+
+ t1 := time.Now()
+ resp = call(router, "TOUCH", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ t2 := time.Now()
+
+ // Unauthorized request is a no-op
+ resp = call(router, "TOUCH", "http://example/"+fooHash+"+3", arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusForbidden)
+
+ // Volume 0 mtime should be updated
+ t, err := vol0.Mtime(fooHash)
+ c.Check(err, IsNil)
+ c.Check(t.After(t1), Equals, true)
+ c.Check(t.Before(t2), Equals, true)
+
+ // Volume 1 mtime should not be updated
+ t, err = vol1.Mtime(fooHash)
+ c.Check(err, IsNil)
+ c.Check(t.Before(t1), Equals, true)
+
+ err = vol0.BlockTrash(fooHash)
+ c.Assert(err, IsNil)
+ err = vol1.BlockTrash(fooHash)
+ c.Assert(err, IsNil)
+ resp = call(router, "TOUCH", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusNotFound)
+}
+
+func (s *routerSuite) TestBlockTrash(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+ err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ err = vol0.blockTouchWithTime(fooHash, time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration()))
+ c.Assert(err, IsNil)
+ resp := call(router, "DELETE", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(vol0.stubLog.String(), Matches, `(?ms).* trash .*`)
+ err = vol0.BlockRead(context.Background(), fooHash, brdiscard)
+ c.Assert(err, Equals, os.ErrNotExist)
+}
+
+func (s *routerSuite) TestBlockUntrash(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+ err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ err = vol0.BlockTrash(fooHash)
+ c.Assert(err, IsNil)
+ err = vol0.BlockRead(context.Background(), fooHash, brdiscard)
+ c.Assert(err, Equals, os.ErrNotExist)
+ resp := call(router, "PUT", "http://example/untrash/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(vol0.stubLog.String(), Matches, `(?ms).* untrash .*`)
+ err = vol0.BlockRead(context.Background(), fooHash, brdiscard)
+ c.Check(err, IsNil)
+}
+
+func (s *routerSuite) TestBadRequest(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ for _, trial := range []string{
+ "GET /",
+ "GET /xyz",
+ "GET /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabcdefg",
+ "GET /untrash",
+ "GET /mounts/blocks/123",
+ "GET /trash",
+ "GET /pull",
+ "GET /debug.json", // old endpoint, no longer exists
+ "GET /status.json", // old endpoint, no longer exists
+ "POST /",
+ "POST /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "POST /trash",
+ "PROPFIND /",
+ "MAKE-COFFEE /",
+ } {
+ c.Logf("=== %s", trial)
+ methodpath := strings.Split(trial, " ")
+ req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
+ resp := httptest.NewRecorder()
+ router.ServeHTTP(resp, req)
+ c.Check(resp.Code, Equals, http.StatusBadRequest)
+ }
+}
+
+func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ for _, token := range []string{"badtoken", ""} {
+ for _, trial := range []string{
+ "PUT /pull",
+ "PUT /trash",
+ "GET /index",
+ "GET /index/",
+ "GET /index/1234",
+ "PUT /untrash/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ } {
+ c.Logf("=== %s", trial)
+ methodpath := strings.Split(trial, " ")
+ req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
+ if token != "" {
+ req.Header.Set("Authorization", "Bearer "+token)
+ }
+ resp := httptest.NewRecorder()
+ router.ServeHTTP(resp, req)
+ if token == "" {
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+ } else {
+ c.Check(resp.Code, Equals, http.StatusForbidden)
+ }
+ }
+ }
+ req := httptest.NewRequest("TOUCH", "http://example/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", nil)
+ resp := httptest.NewRecorder()
+ router.ServeHTTP(resp, req)
+ c.Check(resp.Code, Equals, http.StatusUnauthorized)
+}
+
+func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+ router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(_ context.Context, hash string, w io.WriterAt) error {
+ return httpserver.ErrorWithStatus(errors.New("test error"), http.StatusBadGateway)
+ }
+
+ // To test whether we fall back to volume 1 after volume 0
+ // returns an error, we need to use a block whose rendezvous
+ // order has volume 0 first. Luckily "bar" is such a block.
+ c.Assert(router.keepstore.rendezvous(barHash, router.keepstore.mountsR)[0].UUID, DeepEquals, router.keepstore.mountsR[0].UUID)
+
+ locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, barHash+"+3")
+
+ // Volume 0 fails with an error that specifies an HTTP status
+ // code, so that code should be propagated to caller.
+ resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusBadGateway)
+ c.Check(resp.Body.String(), Equals, "test error\n")
+
+ router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(_ context.Context, hash string, w io.WriterAt) error {
+ return errors.New("no http status provided")
+ }
+ resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusInternalServerError)
+ c.Check(resp.Body.String(), Equals, "no http status provided\n")
+
+ c.Assert(router.keepstore.mountsW[1].volume.BlockWrite(context.Background(), barHash, []byte("bar")), IsNil)
+
+ // If the requested block is available on the second volume,
+ // it doesn't matter that the first volume failed.
+ resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "bar")
+}
+
+func (s *routerSuite) TestIndex(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ resp := call(router, "GET", "http://example/index", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "\n")
+
+ resp = call(router, "GET", "http://example/index?prefix=fff", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, "\n")
+
+ t0 := time.Now().Add(-time.Hour)
+ vol0 := router.keepstore.mounts["zzzzz-nyw5e-000000000000000"].volume.(*stubVolume)
+ err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+ c.Assert(err, IsNil)
+ err = vol0.blockTouchWithTime(fooHash, t0)
+ c.Assert(err, IsNil)
+ err = vol0.BlockWrite(context.Background(), barHash, []byte("bar"))
+ c.Assert(err, IsNil)
+ err = vol0.blockTouchWithTime(barHash, t0)
+ c.Assert(err, IsNil)
+ t1 := time.Now().Add(-time.Minute)
+ vol1 := router.keepstore.mounts["zzzzz-nyw5e-111111111111111"].volume.(*stubVolume)
+ err = vol1.BlockWrite(context.Background(), barHash, []byte("bar"))
+ c.Assert(err, IsNil)
+ err = vol1.blockTouchWithTime(barHash, t1)
+ c.Assert(err, IsNil)
+
+ for _, path := range []string{
+ "/index?prefix=acb",
+ "/index/acb",
+ "/index/?prefix=acb",
+ "/mounts/zzzzz-nyw5e-000000000000000/blocks?prefix=acb",
+ "/mounts/zzzzz-nyw5e-000000000000000/blocks/?prefix=acb",
+ "/mounts/zzzzz-nyw5e-000000000000000/blocks/acb",
+ } {
+ c.Logf("=== %s", path)
+ resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, fooHash+"+3 "+fmt.Sprintf("%d", t0.UnixNano())+"\n\n")
+ }
+
+ for _, path := range []string{
+ "/index?prefix=37",
+ "/index/37",
+ "/index/?prefix=37",
+ } {
+ c.Logf("=== %s", path)
+ resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, ""+
+ barHash+"+3 "+fmt.Sprintf("%d", t0.UnixNano())+"\n"+
+ barHash+"+3 "+fmt.Sprintf("%d", t1.UnixNano())+"\n\n")
+ }
+
+ for _, path := range []string{
+ "/mounts/zzzzz-nyw5e-111111111111111/blocks",
+ "/mounts/zzzzz-nyw5e-111111111111111/blocks/",
+ "/mounts/zzzzz-nyw5e-111111111111111/blocks?prefix=37",
+ "/mounts/zzzzz-nyw5e-111111111111111/blocks/?prefix=37",
+ "/mounts/zzzzz-nyw5e-111111111111111/blocks/37",
+ } {
+ c.Logf("=== %s", path)
+ resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(resp.Body.String(), Equals, barHash+"+3 "+fmt.Sprintf("%d", t1.UnixNano())+"\n\n")
+ }
+
+ for _, path := range []string{
+ "/index",
+ "/index?prefix=",
+ "/index/",
+ "/index/?prefix=",
+ } {
+ c.Logf("=== %s", path)
+ resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(strings.Split(resp.Body.String(), "\n"), HasLen, 5)
+ }
+
+}
+
+// Check that the context passed to a volume method gets cancelled
+// when the http client hangs up.
+func (s *routerSuite) TestCancelOnDisconnect(c *C) {
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ unblock := make(chan struct{})
+ router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(ctx context.Context, hash string, w io.WriterAt) error {
+ <-unblock
+ c.Check(ctx.Err(), NotNil)
+ return ctx.Err()
+ }
+ go func() {
+ time.Sleep(time.Second / 10)
+ cancel()
+ close(unblock)
+ }()
+ locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, "GET", "http://example/"+locSigned, nil)
+ c.Assert(err, IsNil)
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+ resp := httptest.NewRecorder()
+ router.ServeHTTP(resp, req)
+ c.Check(resp.Code, Equals, 499)
+}
+
+func call(handler http.Handler, method, path, tok string, body []byte, hdr http.Header) *httptest.ResponseRecorder {
+ resp := httptest.NewRecorder()
+ req, err := http.NewRequest(method, path, bytes.NewReader(body))
+ if err != nil {
+ panic(err)
+ }
+ for k := range hdr {
+ req.Header.Set(k, hdr.Get(k))
+ }
+ if tok != "" {
+ req.Header.Set("Authorization", "Bearer "+tok)
+ }
+ handler.ServeHTTP(resp, req)
+ return resp
+}
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index ee89b156f7..2e2e97a974 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -5,18 +5,14 @@
package keepstore
import (
- "bufio"
"bytes"
"context"
- "crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
- "io/ioutil"
- "net/http"
"os"
"regexp"
"strings"
@@ -25,824 +21,262 @@ import (
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "github.com/AdRoll/goamz/aws"
- "github.com/AdRoll/goamz/s3"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
func init() {
- driver["S3"] = chooseS3VolumeDriver
-}
-
-func newS3Volume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, v)
- if err != nil {
- return nil, err
- }
- v.logger = logger.WithField("Volume", v.String())
- return v, v.check()
-}
-
-func (v *S3Volume) check() error {
- if v.Bucket == "" {
- return errors.New("DriverParameters: Bucket must be provided")
- }
- if v.IndexPageSize == 0 {
- v.IndexPageSize = 1000
- }
- if v.RaceWindow < 0 {
- return errors.New("DriverParameters: RaceWindow must not be negative")
- }
-
- var ok bool
- v.region, ok = aws.Regions[v.Region]
- if v.Endpoint == "" {
- if !ok {
- return fmt.Errorf("unrecognized region %+q; try specifying endpoint instead", v.Region)
- }
- } else if ok {
- return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
- "specify empty endpoint or use a different region name", v.Region, v.Endpoint)
- } else {
- v.region = aws.Region{
- Name: v.Region,
- S3Endpoint: v.Endpoint,
- S3LocationConstraint: v.LocationConstraint,
- }
- }
-
- // Zero timeouts mean "wait forever", which is a bad
- // default. Default to long timeouts instead.
- if v.ConnectTimeout == 0 {
- v.ConnectTimeout = s3DefaultConnectTimeout
- }
- if v.ReadTimeout == 0 {
- v.ReadTimeout = s3DefaultReadTimeout
- }
-
- v.bucket = &s3bucket{
- bucket: &s3.Bucket{
- S3: v.newS3Client(),
- Name: v.Bucket,
- },
- }
- // Set up prometheus metrics
- lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
- v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
-
- err := v.bootstrapIAMCredentials()
- if err != nil {
- return fmt.Errorf("error getting IAM credentials: %s", err)
- }
-
- return nil
+ driver["S3"] = news3Volume
}
const (
- s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
- s3DefaultConnectTimeout = arvados.Duration(time.Minute)
+ s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
+ s3DefaultConnectTimeout = arvados.Duration(time.Minute)
+ maxClockSkew = 600 * time.Second
+ nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
+ s3downloaderPartSize = 6 * 1024 * 1024
+ s3downloaderReadConcurrency = 11
+ s3uploaderPartSize = 5 * 1024 * 1024
+ s3uploaderWriteConcurrency = 5
)
var (
- // ErrS3TrashDisabled is returned by Trash if that operation
- // is impossible with the current config.
- ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
-
- s3ACL = s3.Private
-
- zeroTime time.Time
+ errS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
+ s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+ s3AWSZeroTime time.Time
)
-const (
- maxClockSkew = 600 * time.Second
- nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
-)
-
-func s3regions() (okList []string) {
- for r := range aws.Regions {
- okList = append(okList, r)
- }
- return
-}
-
-// S3Volume implements Volume using an S3 bucket.
-type S3Volume struct {
+// s3Volume implements Volume using an S3 bucket.
+type s3Volume struct {
arvados.S3VolumeDriverParameters
AuthToken string // populated automatically when IAMRole is used
AuthExpiration time.Time // populated automatically when IAMRole is used
- cluster *arvados.Cluster
- volume arvados.Volume
- logger logrus.FieldLogger
- metrics *volumeMetricsVecs
- bucket *s3bucket
- region aws.Region
- startOnce sync.Once
-}
-
-// GetDeviceID returns a globally unique ID for the storage bucket.
-func (v *S3Volume) GetDeviceID() string {
- return "s3://" + v.Endpoint + "/" + v.Bucket
+ cluster *arvados.Cluster
+ volume arvados.Volume
+ logger logrus.FieldLogger
+ metrics *volumeMetricsVecs
+ bufferPool *bufferPool
+ bucket *s3Bucket
+ region string
+ startOnce sync.Once
}
-func (v *S3Volume) bootstrapIAMCredentials() error {
- if v.AccessKeyID != "" || v.SecretAccessKey != "" {
- if v.IAMRole != "" {
- return errors.New("invalid DriverParameters: AccessKeyID and SecretAccessKey must be blank if IAMRole is specified")
- }
- return nil
- }
- ttl, err := v.updateIAMCredentials()
- if err != nil {
- return err
- }
- go func() {
- for {
- time.Sleep(ttl)
- ttl, err = v.updateIAMCredentials()
- if err != nil {
- v.logger.WithError(err).Warnf("failed to update credentials for IAM role %q", v.IAMRole)
- ttl = time.Second
- } else if ttl < time.Second {
- v.logger.WithField("TTL", ttl).Warnf("received stale credentials for IAM role %q", v.IAMRole)
- ttl = time.Second
- }
- }
- }()
- return nil
+// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
+// wrapped bucket can be replaced atomically with SetBucket in order
+// to update credentials.
+type s3Bucket struct {
+ bucket string
+ svc *s3.Client
+ stats s3awsbucketStats
+ mu sync.Mutex
}
-func (v *S3Volume) newS3Client() *s3.S3 {
- auth := aws.NewAuth(v.AccessKeyID, v.SecretAccessKey, v.AuthToken, v.AuthExpiration)
- client := s3.New(*auth, v.region)
- if !v.V2Signature {
- client.Signature = aws.V4Signature
+func (v *s3Volume) isKeepBlock(s string) (string, bool) {
+ if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
+ s = s[v.PrefixLength+1:]
}
- client.ConnectTimeout = time.Duration(v.ConnectTimeout)
- client.ReadTimeout = time.Duration(v.ReadTimeout)
- return client
+ return s, s3AWSKeepBlockRegexp.MatchString(s)
}
-// returned by AWS metadata endpoint .../security-credentials/${rolename}
-type iamCredentials struct {
- Code string
- LastUpdated time.Time
- Type string
- AccessKeyID string
- SecretAccessKey string
- Token string
- Expiration time.Time
-}
-
-// Returns TTL of updated credentials, i.e., time to sleep until next
-// update.
-func (v *S3Volume) updateIAMCredentials() (time.Duration, error) {
- ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
- defer cancel()
-
- metadataBaseURL := "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
-
- var url string
- if strings.Contains(v.IAMRole, "://") {
- // Configuration provides complete URL (used by tests)
- url = v.IAMRole
- } else if v.IAMRole != "" {
- // Configuration provides IAM role name and we use the
- // AWS metadata endpoint
- url = metadataBaseURL + v.IAMRole
+// Return the key used for a given loc. If PrefixLength==0 then
+// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
+// "abc/abcdef0123", etc.
+func (v *s3Volume) key(loc string) string {
+ if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
+ return loc[:v.PrefixLength] + "/" + loc
} else {
- url = metadataBaseURL
- v.logger.WithField("URL", url).Debug("looking up IAM role name")
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return 0, fmt.Errorf("error setting up request %s: %s", url, err)
- }
- resp, err := http.DefaultClient.Do(req.WithContext(ctx))
- if err != nil {
- return 0, fmt.Errorf("error getting %s: %s", url, err)
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusNotFound {
- return 0, fmt.Errorf("this instance does not have an IAM role assigned -- either assign a role, or configure AccessKeyID and SecretAccessKey explicitly in DriverParameters (error getting %s: HTTP status %s)", url, resp.Status)
- } else if resp.StatusCode != http.StatusOK {
- return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
- }
- body := bufio.NewReader(resp.Body)
- var role string
- _, err = fmt.Fscanf(body, "%s\n", &role)
- if err != nil {
- return 0, fmt.Errorf("error reading response from %s: %s", url, err)
- }
- if n, _ := body.Read(make([]byte, 64)); n > 0 {
- v.logger.Warnf("ignoring additional data returned by metadata endpoint %s after the single role name that we expected", url)
- }
- v.logger.WithField("Role", role).Debug("looked up IAM role name")
- url = url + role
- }
-
- v.logger.WithField("URL", url).Debug("getting credentials")
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return 0, fmt.Errorf("error setting up request %s: %s", url, err)
- }
- resp, err := http.DefaultClient.Do(req.WithContext(ctx))
- if err != nil {
- return 0, fmt.Errorf("error getting %s: %s", url, err)
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
- }
- var cred iamCredentials
- err = json.NewDecoder(resp.Body).Decode(&cred)
- if err != nil {
- return 0, fmt.Errorf("error decoding credentials from %s: %s", url, err)
- }
- v.AccessKeyID, v.SecretAccessKey, v.AuthToken, v.AuthExpiration = cred.AccessKeyID, cred.SecretAccessKey, cred.Token, cred.Expiration
- v.bucket.SetBucket(&s3.Bucket{
- S3: v.newS3Client(),
- Name: v.Bucket,
- })
- // TTL is time from now to expiration, minus 5m. "We make new
- // credentials available at least five minutes before the
- // expiration of the old credentials." --
- // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
- // (If that's not true, the returned ttl might be zero or
- // negative, which the caller can handle.)
- ttl := cred.Expiration.Sub(time.Now()) - 5*time.Minute
- v.logger.WithFields(logrus.Fields{
- "AccessKeyID": cred.AccessKeyID,
- "LastUpdated": cred.LastUpdated,
- "Expiration": cred.Expiration,
- "TTL": arvados.Duration(ttl),
- }).Debug("updated credentials")
- return ttl, nil
-}
-
-func (v *S3Volume) getReaderWithContext(ctx context.Context, key string) (rdr io.ReadCloser, err error) {
- ready := make(chan bool)
- go func() {
- rdr, err = v.getReader(key)
- close(ready)
- }()
- select {
- case <-ready:
- return
- case <-ctx.Done():
- v.logger.Debugf("s3: abandoning getReader(%s): %s", key, ctx.Err())
- go func() {
- <-ready
- if err == nil {
- rdr.Close()
- }
- }()
- return nil, ctx.Err()
+ return loc
}
}
-// getReader wraps (Bucket)GetReader.
-//
-// In situations where (Bucket)GetReader would fail because the block
-// disappeared in a Trash race, getReader calls fixRace to recover the
-// data, and tries again.
-func (v *S3Volume) getReader(key string) (rdr io.ReadCloser, err error) {
- rdr, err = v.bucket.GetReader(key)
- err = v.translateError(err)
- if err == nil || !os.IsNotExist(err) {
- return
+func news3Volume(params newVolumeParams) (volume, error) {
+ v := &s3Volume{
+ cluster: params.Cluster,
+ volume: params.ConfigVolume,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
}
-
- _, err = v.bucket.Head("recent/"+key, nil)
- err = v.translateError(err)
+ err := json.Unmarshal(params.ConfigVolume.DriverParameters, v)
if err != nil {
- // If we can't read recent/X, there's no point in
- // trying fixRace. Give up.
- return
- }
- if !v.fixRace(key) {
- err = os.ErrNotExist
- return
- }
-
- rdr, err = v.bucket.GetReader(key)
- if err != nil {
- v.logger.Warnf("reading %s after successful fixRace: %s", key, err)
- err = v.translateError(err)
+ return nil, err
}
- return
+ v.logger = params.Logger.WithField("Volume", v.DeviceID())
+ return v, v.check("")
}
-// Get a block: copy the block data into buf, and return the number of
-// bytes copied.
-func (v *S3Volume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- key := v.key(loc)
- rdr, err := v.getReaderWithContext(ctx, key)
- if err != nil {
- return 0, err
- }
-
- var n int
- ready := make(chan bool)
- go func() {
- defer close(ready)
-
- defer rdr.Close()
- n, err = io.ReadFull(rdr, buf)
-
- switch err {
- case nil, io.EOF, io.ErrUnexpectedEOF:
- err = nil
- default:
- err = v.translateError(err)
+func (v *s3Volume) translateError(err error) error {
+ if _, ok := err.(*aws.RequestCanceledError); ok {
+ return context.Canceled
+ } else if aerr, ok := err.(awserr.Error); ok {
+ if aerr.Code() == "NotFound" {
+ return os.ErrNotExist
+ } else if aerr.Code() == "NoSuchKey" {
+ return os.ErrNotExist
}
- }()
- select {
- case <-ctx.Done():
- v.logger.Debugf("s3: interrupting ReadFull() with Close() because %s", ctx.Err())
- rdr.Close()
- // Must wait for ReadFull to return, to ensure it
- // doesn't write to buf after we return.
- v.logger.Debug("s3: waiting for ReadFull() to fail")
- <-ready
- return 0, ctx.Err()
- case <-ready:
- return n, err
- }
-}
-
-// Compare the given data with the stored data.
-func (v *S3Volume) Compare(ctx context.Context, loc string, expect []byte) error {
- key := v.key(loc)
- errChan := make(chan error, 1)
- go func() {
- _, err := v.bucket.Head("recent/"+key, nil)
- errChan <- err
- }()
- var err error
- select {
- case <-ctx.Done():
- return ctx.Err()
- case err = <-errChan:
- }
- if err != nil {
- // Checking for "loc" itself here would interfere with
- // future GET requests.
- //
- // On AWS, if X doesn't exist, a HEAD or GET request
- // for X causes X's non-existence to be cached. Thus,
- // if we test for X, then create X and return a
- // signature to our client, the client might still get
- // 404 from all keepstores when trying to read it.
- //
- // To avoid this, we avoid doing HEAD X or GET X until
- // we know X has been written.
- //
- // Note that X might exist even though recent/X
- // doesn't: for example, the response to HEAD recent/X
- // might itself come from a stale cache. In such
- // cases, we will return a false negative and
- // PutHandler might needlessly create another replica
- // on a different volume. That's not ideal, but it's
- // better than passing the eventually-consistent
- // problem on to our clients.
- return v.translateError(err)
}
- rdr, err := v.getReaderWithContext(ctx, key)
- if err != nil {
- return err
- }
- defer rdr.Close()
- return v.translateError(compareReaderWithBuf(ctx, rdr, expect, loc[:32]))
+ return err
}
-// Put writes a block.
-func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- var opts s3.Options
- size := len(block)
- if size > 0 {
- md5, err := hex.DecodeString(loc)
- if err != nil {
- return err
- }
- opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
- // In AWS regions that use V4 signatures, we need to
- // provide ContentSHA256 up front. Otherwise, the S3
- // library reads the request body (from our buffer)
- // into another new buffer in order to compute the
- // SHA256 before sending the request -- which would
- // mean consuming 128 MiB of memory for the duration
- // of a 64 MiB write.
- opts.ContentSHA256 = fmt.Sprintf("%x", sha256.Sum256(block))
- }
-
- key := v.key(loc)
-
- // Send the block data through a pipe, so that (if we need to)
- // we can close the pipe early and abandon our PutReader()
- // goroutine, without worrying about PutReader() accessing our
- // block buffer after we release it.
- bufr, bufw := io.Pipe()
- go func() {
- io.Copy(bufw, bytes.NewReader(block))
- bufw.Close()
- }()
-
- var err error
- ready := make(chan bool)
- go func() {
- defer func() {
- if ctx.Err() != nil {
- v.logger.Debugf("abandoned PutReader goroutine finished with err: %s", err)
- }
- }()
- defer close(ready)
- err = v.bucket.PutReader(key, bufr, int64(size), "application/octet-stream", s3ACL, opts)
- if err != nil {
- return
- }
- err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
- }()
- select {
- case <-ctx.Done():
- v.logger.Debugf("taking PutReader's input away: %s", ctx.Err())
- // Our pipe might be stuck in Write(), waiting for
- // PutReader() to read. If so, un-stick it. This means
- // PutReader will get corrupt data, but that's OK: the
- // size and MD5 won't match, so the write will fail.
- go io.Copy(ioutil.Discard, bufr)
- // CloseWithError() will return once pending I/O is done.
- bufw.CloseWithError(ctx.Err())
- v.logger.Debugf("abandoning PutReader goroutine")
- return ctx.Err()
- case <-ready:
- // Unblock pipe in case PutReader did not consume it.
- io.Copy(ioutil.Discard, bufr)
- return v.translateError(err)
+// safeCopy calls CopyObjectRequest, and checks the response to make
+// sure the copy succeeded and updated the timestamp on the
+// destination object
+//
+// (If something goes wrong during the copy, the error will be
+// embedded in the 200 OK response)
+func (v *s3Volume) safeCopy(dst, src string) error {
+ input := &s3.CopyObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ ContentType: aws.String("application/octet-stream"),
+ CopySource: aws.String(v.bucket.bucket + "/" + src),
+ Key: aws.String(dst),
}
-}
-// Touch sets the timestamp for the given locator to the current time.
-func (v *S3Volume) Touch(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- key := v.key(loc)
- _, err := v.bucket.Head(key, nil)
- err = v.translateError(err)
- if os.IsNotExist(err) && v.fixRace(key) {
- // The data object got trashed in a race, but fixRace
- // rescued it.
- } else if err != nil {
- return err
- }
- err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
- return v.translateError(err)
-}
+ req := v.bucket.svc.CopyObjectRequest(input)
+ resp, err := req.Send(context.Background())
-// Mtime returns the stored timestamp for the given locator.
-func (v *S3Volume) Mtime(loc string) (time.Time, error) {
- key := v.key(loc)
- _, err := v.bucket.Head(key, nil)
- if err != nil {
- return zeroTime, v.translateError(err)
- }
- resp, err := v.bucket.Head("recent/"+key, nil)
err = v.translateError(err)
if os.IsNotExist(err) {
- // The data object X exists, but recent/X is missing.
- err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
- if err != nil {
- v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
- return zeroTime, v.translateError(err)
- }
- v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+key)
- resp, err = v.bucket.Head("recent/"+key, nil)
- if err != nil {
- v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
- return zeroTime, v.translateError(err)
- }
- } else if err != nil {
- // HEAD recent/X failed for some other reason.
- return zeroTime, err
- }
- return v.lastModified(resp)
-}
-
-// IndexTo writes a complete list of locators with the given prefix
-// for which Get() can retrieve data.
-func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
- // Use a merge sort to find matching sets of X and recent/X.
- dataL := s3Lister{
- Logger: v.logger,
- Bucket: v.bucket.Bucket(),
- Prefix: v.key(prefix),
- PageSize: v.IndexPageSize,
- Stats: &v.bucket.stats,
- }
- recentL := s3Lister{
- Logger: v.logger,
- Bucket: v.bucket.Bucket(),
- Prefix: "recent/" + v.key(prefix),
- PageSize: v.IndexPageSize,
- Stats: &v.bucket.stats,
- }
- for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
- if data.Key >= "g" {
- // Conveniently, "recent/*" and "trash/*" are
- // lexically greater than all hex-encoded data
- // hashes, so stopping here avoids iterating
- // over all of them needlessly with dataL.
- break
- }
- loc, isBlk := v.isKeepBlock(data.Key)
- if !isBlk {
- continue
- }
-
- // stamp is the list entry we should use to report the
- // last-modified time for this data block: it will be
- // the recent/X entry if one exists, otherwise the
- // entry for the data block itself.
- stamp := data
-
- // Advance to the corresponding recent/X marker, if any
- for recent != nil && recentL.Error() == nil {
- if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
- recent = recentL.Next()
- continue
- } else if cmp == 0 {
- stamp = recent
- recent = recentL.Next()
- break
- } else {
- // recent/X marker is missing: we'll
- // use the timestamp on the data
- // object.
- break
- }
- }
- if err := recentL.Error(); err != nil {
- return err
- }
- t, err := time.Parse(time.RFC3339, stamp.LastModified)
- if err != nil {
- return err
- }
- // We truncate sub-second precision here. Otherwise
- // timestamps will never match the RFC1123-formatted
- // Last-Modified values parsed by Mtime().
- fmt.Fprintf(writer, "%s+%d %d\n", loc, data.Size, t.Unix()*1000000000)
- }
- return dataL.Error()
-}
-
-// Trash a Keep block.
-func (v *S3Volume) Trash(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- if t, err := v.Mtime(loc); err != nil {
return err
- } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
- return nil
- }
- key := v.key(loc)
- if v.cluster.Collections.BlobTrashLifetime == 0 {
- if !v.UnsafeDelete {
- return ErrS3TrashDisabled
- }
- return v.translateError(v.bucket.Del(key))
- }
- err := v.checkRaceWindow(key)
- if err != nil {
- return err
- }
- err = v.safeCopy("trash/"+key, key)
- if err != nil {
- return err
- }
- return v.translateError(v.bucket.Del(key))
-}
-
-// checkRaceWindow returns a non-nil error if trash/key is, or might
-// be, in the race window (i.e., it's not safe to trash key).
-func (v *S3Volume) checkRaceWindow(key string) error {
- resp, err := v.bucket.Head("trash/"+key, nil)
- err = v.translateError(err)
- if os.IsNotExist(err) {
- // OK, trash/X doesn't exist so we're not in the race
- // window
- return nil
} else if err != nil {
- // Error looking up trash/X. We don't know whether
- // we're in the race window
- return err
+ return fmt.Errorf("PutCopy(%q â %q): %s", dst, v.bucket.bucket+"/"+src, err)
}
- t, err := v.lastModified(resp)
- if err != nil {
- // Can't parse timestamp
- return err
- }
- safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
- if safeWindow <= 0 {
- // We can't count on "touch trash/X" to prolong
- // trash/X's lifetime. The new timestamp might not
- // become visible until now+raceWindow, and EmptyTrash
- // is allowed to delete trash/X before then.
- return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
- }
- // trash/X exists, but it won't be eligible for deletion until
- // after now+raceWindow, so it's safe to overwrite it.
- return nil
-}
-// safeCopy calls PutCopy, and checks the response to make sure the
-// copy succeeded and updated the timestamp on the destination object
-// (PutCopy returns 200 OK if the request was received, even if the
-// copy failed).
-func (v *S3Volume) safeCopy(dst, src string) error {
- resp, err := v.bucket.Bucket().PutCopy(dst, s3ACL, s3.CopyOptions{
- ContentType: "application/octet-stream",
- MetadataDirective: "REPLACE",
- }, v.bucket.Bucket().Name+"/"+src)
- err = v.translateError(err)
- if os.IsNotExist(err) {
- return err
- } else if err != nil {
- return fmt.Errorf("PutCopy(%q â %q): %s", dst, v.bucket.Bucket().Name+"/"+src, err)
- }
- if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
- return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
- } else if time.Now().Sub(t) > maxClockSkew {
- return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t)
+ if resp.CopyObjectResult.LastModified == nil {
+ return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
+ } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
+ return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
}
return nil
}
-// Get the LastModified header from resp, and parse it as RFC1123 or
-// -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123.
-func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) {
- s := resp.Header.Get("Last-Modified")
- t, err = time.Parse(time.RFC1123, s)
- if err != nil && s != "" {
- // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
- // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
- // as required by HTTP spec. If it's not a valid HTTP
- // header value, it's probably AWS (or s3test) giving
- // us a nearly-RFC1123 timestamp.
- t, err = time.Parse(nearlyRFC1123, s)
+func (v *s3Volume) check(ec2metadataHostname string) error {
+ if v.Bucket == "" {
+ return errors.New("DriverParameters: Bucket must be provided")
}
- return
-}
-
-// Untrash moves block from trash back into store
-func (v *S3Volume) Untrash(loc string) error {
- key := v.key(loc)
- err := v.safeCopy(key, "trash/"+key)
- if err != nil {
- return err
+ if v.IndexPageSize == 0 {
+ v.IndexPageSize = 1000
}
- err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
- return v.translateError(err)
-}
-
-// Status returns a *VolumeStatus representing the current in-use
-// storage capacity and a fake available capacity that doesn't make
-// the volume seem full or nearly-full.
-func (v *S3Volume) Status() *VolumeStatus {
- return &VolumeStatus{
- DeviceNum: 1,
- BytesFree: BlockSize * 1000,
- BytesUsed: 1,
+ if v.RaceWindow < 0 {
+ return errors.New("DriverParameters: RaceWindow must not be negative")
}
-}
-
-// InternalStats returns bucket I/O and API call counters.
-func (v *S3Volume) InternalStats() interface{} {
- return &v.bucket.stats
-}
-
-// String implements fmt.Stringer.
-func (v *S3Volume) String() string {
- return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
-}
-var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
-
-func (v *S3Volume) isKeepBlock(s string) (string, bool) {
- if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
- s = s[v.PrefixLength+1:]
+ if v.V2Signature {
+ return errors.New("DriverParameters: V2Signature is not supported")
}
- return s, s3KeepBlockRegexp.MatchString(s)
-}
-// Return the key used for a given loc. If PrefixLength==0 then
-// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
-// "abc/abcdef0123", etc.
-func (v *S3Volume) key(loc string) string {
- if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
- return loc[:v.PrefixLength] + "/" + loc
- } else {
- return loc
- }
-}
+ defaultResolver := endpoints.NewDefaultResolver()
-// fixRace(X) is called when "recent/X" exists but "X" doesn't
-// exist. If the timestamps on "recent/X" and "trash/X" indicate there
-// was a race between Put and Trash, fixRace recovers from the race by
-// Untrashing the block.
-func (v *S3Volume) fixRace(key string) bool {
- trash, err := v.bucket.Head("trash/"+key, nil)
- if err != nil {
- if !os.IsNotExist(v.translateError(err)) {
- v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
+ cfg := defaults.Config()
+
+ if v.Endpoint == "" && v.Region == "" {
+ return fmt.Errorf("AWS region or endpoint must be specified")
+ } else if v.Endpoint != "" || ec2metadataHostname != "" {
+ myCustomResolver := func(service, region string) (aws.Endpoint, error) {
+ if v.Endpoint != "" && service == "s3" {
+ return aws.Endpoint{
+ URL: v.Endpoint,
+ SigningRegion: region,
+ }, nil
+ } else if service == "ec2metadata" && ec2metadataHostname != "" {
+ return aws.Endpoint{
+ URL: ec2metadataHostname,
+ }, nil
+ } else {
+ return defaultResolver.ResolveEndpoint(service, region)
+ }
}
- return false
+ cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
}
- trashTime, err := v.lastModified(trash)
- if err != nil {
- v.logger.WithError(err).Errorf("fixRace: error parsing time %q", trash.Header.Get("Last-Modified"))
- return false
+ if v.Region == "" {
+ // Endpoint is already specified (otherwise we would
+ // have errored out above), but Region is also
+ // required by the aws sdk, in order to determine
+ // SignatureVersions.
+ v.Region = "us-east-1"
}
+ cfg.Region = v.Region
- recent, err := v.bucket.Head("recent/"+key, nil)
- if err != nil {
- v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
- return false
+ // Zero timeouts mean "wait forever", which is a bad
+ // default. Default to long timeouts instead.
+ if v.ConnectTimeout == 0 {
+ v.ConnectTimeout = s3DefaultConnectTimeout
}
- recentTime, err := v.lastModified(recent)
- if err != nil {
- v.logger.WithError(err).Errorf("fixRace: error parsing time %q", recent.Header.Get("Last-Modified"))
- return false
+ if v.ReadTimeout == 0 {
+ v.ReadTimeout = s3DefaultReadTimeout
}
- ageWhenTrashed := trashTime.Sub(recentTime)
- if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
- // No evidence of a race: block hasn't been written
- // since it became eligible for Trash. No fix needed.
- return false
- }
+ creds := aws.NewChainProvider(
+ []aws.CredentialsProvider{
+ aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
+ ec2rolecreds.New(ec2metadata.New(cfg), func(opts *ec2rolecreds.ProviderOptions) {
+ // (from aws-sdk-go-v2 comments)
+ // "allow the credentials to trigger
+ // refreshing prior to the credentials
+ // actually expiring. This is
+ // beneficial so race conditions with
+ // expiring credentials do not cause
+ // request to fail unexpectedly due to
+ // ExpiredTokenException exceptions."
+ //
+ // (from
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
+ // "We make new credentials available
+ // at least five minutes before the
+ // expiration of the old credentials."
+ opts.ExpiryWindow = 5 * time.Minute
+ }),
+ })
- v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
- v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
- err = v.safeCopy(key, "trash/"+key)
- if err != nil {
- v.logger.WithError(err).Error("fixRace: copy failed")
- return false
+ cfg.Credentials = creds
+
+ v.bucket = &s3Bucket{
+ bucket: v.Bucket,
+ svc: s3.New(cfg),
}
- return true
+
+ // Set up prometheus metrics
+ lbls := prometheus.Labels{"device_id": v.DeviceID()}
+ v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
+
+ return nil
}
-func (v *S3Volume) translateError(err error) error {
- switch err := err.(type) {
- case *s3.Error:
- if (err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey") ||
- strings.Contains(err.Error(), "Not Found") {
- return os.ErrNotExist
- }
- // Other 404 errors like NoSuchVersion and
- // NoSuchBucket are different problems which should
- // get called out downstream, so we don't convert them
- // to os.ErrNotExist.
- }
- return err
+// DeviceID returns a globally unique ID for the storage bucket.
+func (v *s3Volume) DeviceID() string {
+ return "s3://" + v.Endpoint + "/" + v.Bucket
}
// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
// and deletes them from the volume.
-func (v *S3Volume) EmptyTrash() {
- if v.cluster.Collections.BlobDeleteConcurrency < 1 {
- return
- }
-
+func (v *s3Volume) EmptyTrash() {
var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
// Define "ready to delete" as "...when EmptyTrash started".
startT := time.Now()
- emptyOneKey := func(trash *s3.Key) {
- key := trash.Key[6:]
- loc, isBlk := v.isKeepBlock(key)
- if !isBlk {
+ emptyOneKey := func(trash *s3.Object) {
+ key := strings.TrimPrefix(*trash.Key, "trash/")
+ loc, isblk := v.isKeepBlock(key)
+ if !isblk {
return
}
- atomic.AddInt64(&bytesInTrash, trash.Size)
+ atomic.AddInt64(&bytesInTrash, *trash.Size)
atomic.AddInt64(&blocksInTrash, 1)
- trashT, err := time.Parse(time.RFC3339, trash.LastModified)
- if err != nil {
- v.logger.Warnf("EmptyTrash: %q: parse %q: %s", trash.Key, trash.LastModified, err)
- return
- }
- recent, err := v.bucket.Head("recent/"+key, nil)
+ trashT := *trash.LastModified
+ recent, err := v.head("recent/" + key)
if err != nil && os.IsNotExist(v.translateError(err)) {
- v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
- err = v.Untrash(loc)
+ v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
+ err = v.BlockUntrash(loc)
if err != nil {
v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
}
@@ -851,14 +285,9 @@ func (v *S3Volume) EmptyTrash() {
v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
return
}
- recentT, err := v.lastModified(recent)
- if err != nil {
- v.logger.WithError(err).Warnf("EmptyTrash: %q: error parsing %q", "recent/"+key, recent.Header.Get("Last-Modified"))
- return
- }
- if trashT.Sub(recentT) < v.cluster.Collections.BlobSigningTTL.Duration() {
- if age := startT.Sub(recentT); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
- // recent/loc is too old to protect
+ if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
+ if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
+ // recent/key is too old to protect
// loc from being Trashed again during
// the raceWindow that starts if we
// delete trash/X now.
@@ -868,10 +297,10 @@ func (v *S3Volume) EmptyTrash() {
// necessary to avoid starvation.
v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
v.fixRace(key)
- v.Touch(loc)
+ v.BlockTouch(loc)
return
}
- _, err := v.bucket.Head(key, nil)
+ _, err := v.head(key)
if os.IsNotExist(err) {
v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
v.fixRace(key)
@@ -884,17 +313,17 @@ func (v *S3Volume) EmptyTrash() {
if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
return
}
- err = v.bucket.Del(trash.Key)
+ err = v.bucket.Del(*trash.Key)
if err != nil {
- v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", trash.Key)
+ v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
return
}
- atomic.AddInt64(&bytesDeleted, trash.Size)
+ atomic.AddInt64(&bytesDeleted, *trash.Size)
atomic.AddInt64(&blocksDeleted, 1)
- _, err = v.bucket.Head(key, nil)
+ _, err = v.head(*trash.Key)
if err == nil {
- v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", key, key)
+ v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
return
}
if !os.IsNotExist(v.translateError(err)) {
@@ -908,7 +337,7 @@ func (v *S3Volume) EmptyTrash() {
}
var wg sync.WaitGroup
- todo := make(chan *s3.Key, v.cluster.Collections.BlobDeleteConcurrency)
+ todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
wg.Add(1)
go func() {
@@ -919,9 +348,9 @@ func (v *S3Volume) EmptyTrash() {
}()
}
- trashL := s3Lister{
+ trashL := s3awsLister{
Logger: v.logger,
- Bucket: v.bucket.Bucket(),
+ Bucket: v.bucket,
Prefix: "trash/",
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
@@ -935,23 +364,193 @@ func (v *S3Volume) EmptyTrash() {
if err := trashL.Error(); err != nil {
v.logger.WithError(err).Error("EmptyTrash: lister failed")
}
- v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+ v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+}
+
+// fixRace(X) is called when "recent/X" exists but "X" doesn't
+// exist. If the timestamps on "recent/X" and "trash/X" indicate there
+// was a race between Put and Trash, fixRace recovers from the race by
+// Untrashing the block.
+func (v *s3Volume) fixRace(key string) bool {
+ trash, err := v.head("trash/" + key)
+ if err != nil {
+ if !os.IsNotExist(v.translateError(err)) {
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
+ }
+ return false
+ }
+
+ recent, err := v.head("recent/" + key)
+ if err != nil {
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
+ return false
+ }
+
+ recentTime := *recent.LastModified
+ trashTime := *trash.LastModified
+ ageWhenTrashed := trashTime.Sub(recentTime)
+ if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
+ // No evidence of a race: block hasn't been written
+ // since it became eligible for Trash. No fix needed.
+ return false
+ }
+
+ v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
+ v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
+ err = v.safeCopy(key, "trash/"+key)
+ if err != nil {
+ v.logger.WithError(err).Error("fixRace: copy failed")
+ return false
+ }
+ return true
+}
+
+func (v *s3Volume) head(key string) (result *s3.HeadObjectOutput, err error) {
+ input := &s3.HeadObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ }
+
+ req := v.bucket.svc.HeadObjectRequest(input)
+ res, err := req.Send(context.TODO())
+
+ v.bucket.stats.TickOps("head")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
+ v.bucket.stats.TickErr(err)
+
+ if err != nil {
+ return nil, v.translateError(err)
+ }
+ result = res.HeadObjectOutput
+ return
+}
+
+// BlockRead reads a Keep block that has been stored as a block blob
+// in the S3 bucket.
+func (v *s3Volume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+ key := v.key(hash)
+ err := v.readWorker(ctx, key, w)
+ if err != nil {
+ err = v.translateError(err)
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ _, err = v.head("recent/" + key)
+ err = v.translateError(err)
+ if err != nil {
+ // If we can't read recent/X, there's no point in
+ // trying fixRace. Give up.
+ return err
+ }
+ if !v.fixRace(key) {
+ err = os.ErrNotExist
+ return err
+ }
+
+ err = v.readWorker(ctx, key, w)
+ if err != nil {
+ v.logger.Warnf("reading %s after successful fixRace: %s", hash, err)
+ err = v.translateError(err)
+ return err
+ }
+ }
+ return nil
+}
+
+func (v *s3Volume) readWorker(ctx context.Context, key string, dst io.WriterAt) error {
+ downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
+ u.PartSize = s3downloaderPartSize
+ u.Concurrency = s3downloaderReadConcurrency
+ })
+ count, err := downloader.DownloadWithContext(ctx, dst, &s3.GetObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ })
+ v.bucket.stats.TickOps("get")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
+ v.bucket.stats.TickErr(err)
+ v.bucket.stats.TickInBytes(uint64(count))
+ return v.translateError(err)
+}
+
+func (v *s3Volume) writeObject(ctx context.Context, key string, r io.Reader) error {
+ if r == nil {
+ // r == nil leads to a memory violation in func readFillBuf in
+ // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
+ r = bytes.NewReader(nil)
+ }
+
+ uploadInput := s3manager.UploadInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ Body: r,
+ }
+
+ if loc, ok := v.isKeepBlock(key); ok {
+ var contentMD5 string
+ md5, err := hex.DecodeString(loc)
+ if err != nil {
+ return v.translateError(err)
+ }
+ contentMD5 = base64.StdEncoding.EncodeToString(md5)
+ uploadInput.ContentMD5 = &contentMD5
+ }
+
+ // Experimentation indicated that using concurrency 5 yields the best
+ // throughput, better than higher concurrency (10 or 13) by ~5%.
+ // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
+ // is detrimental to throughput (minus ~15%).
+ uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
+ u.PartSize = s3uploaderPartSize
+ u.Concurrency = s3uploaderWriteConcurrency
+ })
+
+ // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
+ // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
+ // block, so there is no extra memory use to be concerned about. See
+ // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
+ // calculating the Sha-256 because we don't need it; we already use md5sum
+ // hashes that match the name of the block.
+ _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
+ r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
+ }))
+
+ v.bucket.stats.TickOps("put")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
+ v.bucket.stats.TickErr(err)
+
+ return v.translateError(err)
+}
+
+// Put writes a block.
+func (v *s3Volume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+ // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
+ // sdk to avoid memory allocation there. See #17339 for more information.
+ rdr := bytes.NewReader(data)
+ r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
+ key := v.key(hash)
+ err := v.writeObject(ctx, key, r)
+ if err != nil {
+ return err
+ }
+ return v.writeObject(ctx, "recent/"+key, nil)
}
-type s3Lister struct {
- Logger logrus.FieldLogger
- Bucket *s3.Bucket
- Prefix string
- PageSize int
- Stats *s3bucketStats
- nextMarker string
- buf []s3.Key
- err error
+type s3awsLister struct {
+ Logger logrus.FieldLogger
+ Bucket *s3Bucket
+ Prefix string
+ PageSize int
+ Stats *s3awsbucketStats
+ ContinuationToken string
+ buf []s3.Object
+ err error
}
// First fetches the first page and returns the first item. It returns
// nil if the response is the empty set or an error occurs.
-func (lister *s3Lister) First() *s3.Key {
+func (lister *s3awsLister) First() *s3.Object {
lister.getPage()
return lister.pop()
}
@@ -959,41 +558,65 @@ func (lister *s3Lister) First() *s3.Key {
// Next returns the next item, fetching the next page if necessary. It
// returns nil if the last available item has already been fetched, or
// an error occurs.
-func (lister *s3Lister) Next() *s3.Key {
- if len(lister.buf) == 0 && lister.nextMarker != "" {
+func (lister *s3awsLister) Next() *s3.Object {
+ if len(lister.buf) == 0 && lister.ContinuationToken != "" {
lister.getPage()
}
return lister.pop()
}
// Return the most recent error encountered by First or Next.
-func (lister *s3Lister) Error() error {
+func (lister *s3awsLister) Error() error {
return lister.err
}
-func (lister *s3Lister) getPage() {
+func (lister *s3awsLister) getPage() {
lister.Stats.TickOps("list")
lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
- resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
- lister.nextMarker = ""
+
+ var input *s3.ListObjectsV2Input
+ if lister.ContinuationToken == "" {
+ input = &s3.ListObjectsV2Input{
+ Bucket: aws.String(lister.Bucket.bucket),
+ MaxKeys: aws.Int64(int64(lister.PageSize)),
+ Prefix: aws.String(lister.Prefix),
+ }
+ } else {
+ input = &s3.ListObjectsV2Input{
+ Bucket: aws.String(lister.Bucket.bucket),
+ MaxKeys: aws.Int64(int64(lister.PageSize)),
+ Prefix: aws.String(lister.Prefix),
+ ContinuationToken: &lister.ContinuationToken,
+ }
+ }
+
+ req := lister.Bucket.svc.ListObjectsV2Request(input)
+ resp, err := req.Send(context.Background())
if err != nil {
- lister.err = err
+ if aerr, ok := err.(awserr.Error); ok {
+ lister.err = aerr
+ } else {
+ lister.err = err
+ }
return
}
- if resp.IsTruncated {
- lister.nextMarker = resp.NextMarker
+
+ if *resp.IsTruncated {
+ lister.ContinuationToken = *resp.NextContinuationToken
+ } else {
+ lister.ContinuationToken = ""
}
- lister.buf = make([]s3.Key, 0, len(resp.Contents))
+ lister.buf = make([]s3.Object, 0, len(resp.Contents))
for _, key := range resp.Contents {
- if !strings.HasPrefix(key.Key, lister.Prefix) {
- lister.Logger.Warnf("s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
+ if !strings.HasPrefix(*key.Key, lister.Prefix) {
+ lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
continue
}
lister.buf = append(lister.buf, key)
}
}
-func (lister *s3Lister) pop() (k *s3.Key) {
+func (lister *s3awsLister) pop() (k *s3.Object) {
if len(lister.buf) > 0 {
k = &lister.buf[0]
lister.buf = lister.buf[1:]
@@ -1001,71 +624,201 @@ func (lister *s3Lister) pop() (k *s3.Key) {
return
}
-// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
-// wrapped bucket can be replaced atomically with SetBucket in order
-// to update credentials.
-type s3bucket struct {
- bucket *s3.Bucket
- stats s3bucketStats
- mu sync.Mutex
-}
+// Index writes a complete list of locators with the given prefix
+// for which Get() can retrieve data.
+func (v *s3Volume) Index(ctx context.Context, prefix string, writer io.Writer) error {
+ prefix = v.key(prefix)
+ // Use a merge sort to find matching sets of X and recent/X.
+ dataL := s3awsLister{
+ Logger: v.logger,
+ Bucket: v.bucket,
+ Prefix: prefix,
+ PageSize: v.IndexPageSize,
+ Stats: &v.bucket.stats,
+ }
+ recentL := s3awsLister{
+ Logger: v.logger,
+ Bucket: v.bucket,
+ Prefix: "recent/" + prefix,
+ PageSize: v.IndexPageSize,
+ Stats: &v.bucket.stats,
+ }
+ for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if *data.Key >= "g" {
+ // Conveniently, "recent/*" and "trash/*" are
+ // lexically greater than all hex-encoded data
+ // hashes, so stopping here avoids iterating
+ // over all of them needlessly with dataL.
+ break
+ }
+ loc, isblk := v.isKeepBlock(*data.Key)
+ if !isblk {
+ continue
+ }
+
+ // stamp is the list entry we should use to report the
+ // last-modified time for this data block: it will be
+ // the recent/X entry if one exists, otherwise the
+ // entry for the data block itself.
+ stamp := data
-func (b *s3bucket) Bucket() *s3.Bucket {
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.bucket
+ // Advance to the corresponding recent/X marker, if any
+ for recent != nil && recentL.Error() == nil {
+ if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
+ recent = recentL.Next()
+ continue
+ } else if cmp == 0 {
+ stamp = recent
+ recent = recentL.Next()
+ break
+ } else {
+ // recent/X marker is missing: we'll
+ // use the timestamp on the data
+ // object.
+ break
+ }
+ }
+ if err := recentL.Error(); err != nil {
+ return err
+ }
+ // We truncate sub-second precision here. Otherwise
+ // timestamps will never match the RFC1123-formatted
+ // Last-Modified values parsed by Mtime().
+ fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
+ }
+ return dataL.Error()
}
-func (b *s3bucket) SetBucket(bucket *s3.Bucket) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.bucket = bucket
+// Mtime returns the stored timestamp for the given locator.
+func (v *s3Volume) Mtime(loc string) (time.Time, error) {
+ key := v.key(loc)
+ _, err := v.head(key)
+ if err != nil {
+ return s3AWSZeroTime, v.translateError(err)
+ }
+ resp, err := v.head("recent/" + key)
+ err = v.translateError(err)
+ if os.IsNotExist(err) {
+ // The data object X exists, but recent/X is missing.
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
+ if err != nil {
+ v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
+ return s3AWSZeroTime, v.translateError(err)
+ }
+ v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
+ resp, err = v.head("recent/" + key)
+ if err != nil {
+ v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
+ return s3AWSZeroTime, v.translateError(err)
+ }
+ } else if err != nil {
+ // HEAD recent/X failed for some other reason.
+ return s3AWSZeroTime, err
+ }
+ return *resp.LastModified, err
}
-func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
- rdr, err := b.Bucket().GetReader(path)
- b.stats.TickOps("get")
- b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
- b.stats.TickErr(err)
- return NewCountingReader(rdr, b.stats.TickInBytes), err
+// InternalStats returns bucket I/O and API call counters.
+func (v *s3Volume) InternalStats() interface{} {
+ return &v.bucket.stats
}
-func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
- resp, err := b.Bucket().Head(path, headers)
- b.stats.TickOps("head")
- b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
- b.stats.TickErr(err)
- return resp, err
+// BlockTouch sets the timestamp for the given locator to the current time.
+func (v *s3Volume) BlockTouch(hash string) error {
+ key := v.key(hash)
+ _, err := v.head(key)
+ err = v.translateError(err)
+ if os.IsNotExist(err) && v.fixRace(key) {
+ // The data object got trashed in a race, but fixRace
+ // rescued it.
+ } else if err != nil {
+ return err
+ }
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
+ return v.translateError(err)
}
-func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
- if length == 0 {
- // goamz will only send Content-Length: 0 when reader
- // is nil due to net.http.Request.ContentLength
- // behavior. Otherwise, Content-Length header is
- // omitted which will cause some S3 services
- // (including AWS and Ceph RadosGW) to fail to create
- // empty objects.
- r = nil
- } else {
- r = NewCountingReader(r, b.stats.TickOutBytes)
+// checkRaceWindow returns a non-nil error if trash/key is, or might
+// be, in the race window (i.e., it's not safe to trash key).
+func (v *s3Volume) checkRaceWindow(key string) error {
+ resp, err := v.head("trash/" + key)
+ err = v.translateError(err)
+ if os.IsNotExist(err) {
+ // OK, trash/X doesn't exist so we're not in the race
+ // window
+ return nil
+ } else if err != nil {
+ // Error looking up trash/X. We don't know whether
+ // we're in the race window
+ return err
}
- err := b.Bucket().PutReader(path, r, length, contType, perm, options)
- b.stats.TickOps("put")
- b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
- b.stats.TickErr(err)
- return err
+ t := resp.LastModified
+ safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
+ if safeWindow <= 0 {
+ // We can't count on "touch trash/X" to prolong
+ // trash/X's lifetime. The new timestamp might not
+ // become visible until now+raceWindow, and EmptyTrash
+ // is allowed to delete trash/X before then.
+ return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
+ }
+ // trash/X exists, but it won't be eligible for deletion until
+ // after now+raceWindow, so it's safe to overwrite it.
+ return nil
}
-func (b *s3bucket) Del(path string) error {
- err := b.Bucket().Del(path)
+func (b *s3Bucket) Del(path string) error {
+ input := &s3.DeleteObjectInput{
+ Bucket: aws.String(b.bucket),
+ Key: aws.String(path),
+ }
+ req := b.svc.DeleteObjectRequest(input)
+ _, err := req.Send(context.Background())
b.stats.TickOps("delete")
b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
b.stats.TickErr(err)
return err
}
-type s3bucketStats struct {
+// Trash a Keep block.
+func (v *s3Volume) BlockTrash(loc string) error {
+ if t, err := v.Mtime(loc); err != nil {
+ return err
+ } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
+ return nil
+ }
+ key := v.key(loc)
+ if v.cluster.Collections.BlobTrashLifetime == 0 {
+ if !v.UnsafeDelete {
+ return errS3TrashDisabled
+ }
+ return v.translateError(v.bucket.Del(key))
+ }
+ err := v.checkRaceWindow(key)
+ if err != nil {
+ return err
+ }
+ err = v.safeCopy("trash/"+key, key)
+ if err != nil {
+ return err
+ }
+ return v.translateError(v.bucket.Del(key))
+}
+
+// BlockUntrash moves block from trash back into store
+func (v *s3Volume) BlockUntrash(hash string) error {
+ key := v.key(hash)
+ err := v.safeCopy(key, "trash/"+key)
+ if err != nil {
+ return err
+ }
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
+ return v.translateError(err)
+}
+
+type s3awsbucketStats struct {
statsTicker
Ops uint64
GetOps uint64
@@ -1075,13 +828,18 @@ type s3bucketStats struct {
ListOps uint64
}
-func (s *s3bucketStats) TickErr(err error) {
+func (s *s3awsbucketStats) TickErr(err error) {
if err == nil {
return
}
errType := fmt.Sprintf("%T", err)
- if err, ok := err.(*s3.Error); ok {
- errType = errType + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
+ if aerr, ok := err.(awserr.Error); ok {
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
+ } else {
+ errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
+ }
}
s.statsTicker.TickErr(err, errType)
}
diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go
index a820983568..fb68e1c057 100644
--- a/services/keepstore/s3_volume_test.go
+++ b/services/keepstore/s3_volume_test.go
@@ -19,39 +19,49 @@ import (
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/AdRoll/goamz/s3"
- "github.com/AdRoll/goamz/s3/s3test"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
+
+ "github.com/johannesboyne/gofakes3"
+ "github.com/johannesboyne/gofakes3/backend/s3mem"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
const (
- TestBucketName = "testbucket"
+ s3TestBucketName = "testbucket"
)
-type fakeClock struct {
+type s3AWSFakeClock struct {
now *time.Time
}
-func (c *fakeClock) Now() time.Time {
+func (c *s3AWSFakeClock) Now() time.Time {
if c.now == nil {
- return time.Now()
+ return time.Now().UTC()
}
- return *c.now
+ return c.now.UTC()
}
-var _ = check.Suite(&StubbedS3Suite{})
+func (c *s3AWSFakeClock) Since(t time.Time) time.Duration {
+ return c.Now().Sub(t)
+}
+
+var _ = check.Suite(&stubbedS3Suite{})
+
+var srv httptest.Server
-type StubbedS3Suite struct {
+type stubbedS3Suite struct {
s3server *httptest.Server
metadata *httptest.Server
cluster *arvados.Cluster
- handler *handler
- volumes []*TestableS3Volume
+ volumes []*testableS3Volume
}
-func (s *StubbedS3Suite) SetUpTest(c *check.C) {
+func (s *stubbedS3Suite) SetUpTest(c *check.C) {
s.s3server = nil
s.metadata = nil
s.cluster = testCluster(c)
@@ -59,36 +69,41 @@ func (s *StubbedS3Suite) SetUpTest(c *check.C) {
"zzzzz-nyw5e-000000000000000": {Driver: "S3"},
"zzzzz-nyw5e-111111111111111": {Driver: "S3"},
}
- s.handler = &handler{}
}
-func (s *StubbedS3Suite) TestGeneric(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
+func (s *stubbedS3Suite) TestGeneric(c *check.C) {
+ DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
// Use a negative raceWindow so s3test's 1-second
// timestamp precision doesn't confuse fixRace.
- return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+ return s.newTestableVolume(c, params, -2*time.Second)
})
}
-func (s *StubbedS3Suite) TestGenericReadOnly(c *check.C) {
- DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+func (s *stubbedS3Suite) TestGenericReadOnly(c *check.C) {
+ DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+ return s.newTestableVolume(c, params, -2*time.Second)
})
}
-func (s *StubbedS3Suite) TestGenericWithPrefix(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- v := s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+func (s *stubbedS3Suite) TestGenericWithPrefix(c *check.C) {
+ DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+ v := s.newTestableVolume(c, params, -2*time.Second)
v.PrefixLength = 3
return v
})
}
-func (s *StubbedS3Suite) TestIndex(c *check.C) {
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 0)
+func (s *stubbedS3Suite) TestIndex(c *check.C) {
+ v := s.newTestableVolume(c, newVolumeParams{
+ Cluster: s.cluster,
+ ConfigVolume: arvados.Volume{Replication: 2},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ }, 0)
v.IndexPageSize = 3
for i := 0; i < 256; i++ {
- v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
+ err := v.blockWriteWithoutMD5Check(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
+ c.Assert(err, check.IsNil)
}
for _, spec := range []struct {
prefix string
@@ -100,7 +115,7 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) {
{"abc", 0},
} {
buf := new(bytes.Buffer)
- err := v.IndexTo(spec.prefix, buf)
+ err := v.Index(context.Background(), spec.prefix, buf)
c.Check(err, check.IsNil)
idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
@@ -109,15 +124,16 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) {
}
}
-func (s *StubbedS3Suite) TestSignatureVersion(c *check.C) {
+func (s *stubbedS3Suite) TestSignature(c *check.C) {
var header http.Header
stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
header = r.Header
}))
defer stub.Close()
- // Default V4 signature
- vol := S3Volume{
+ // The aws-sdk-go-v2 driver only supports S3 V4 signatures. S3 v2 signatures are being phased out
+ // as of June 24, 2020. Cf. https://forums.aws.amazon.com/ann.jspa?annID=5816
+ vol := s3Volume{
S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
AccessKeyID: "xxx",
SecretAccessKey: "xxx",
@@ -129,34 +145,17 @@ func (s *StubbedS3Suite) TestSignatureVersion(c *check.C) {
logger: ctxlog.TestLogger(c),
metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
}
- err := vol.check()
- c.Check(err, check.IsNil)
- err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
- c.Check(err, check.IsNil)
- c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
+ err := vol.check("")
+ // Our test S3 server uses the older 'Path Style'
+ vol.bucket.svc.ForcePathStyle = true
- // Force V2 signature
- vol = S3Volume{
- S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
- AccessKeyID: "xxx",
- SecretAccessKey: "xxx",
- Endpoint: stub.URL,
- Region: "test-region-1",
- Bucket: "test-bucket-name",
- V2Signature: true,
- },
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
- }
- err = vol.check()
c.Check(err, check.IsNil)
- err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
+ err = vol.BlockWrite(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
c.Check(err, check.IsNil)
- c.Check(header.Get("Authorization"), check.Matches, `AWS xxx:.*`)
+ c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
}
-func (s *StubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
+func (s *stubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339)
exp := time.Now().UTC().Add(time.Hour).Format(time.RFC3339)
@@ -167,16 +166,28 @@ func (s *StubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
}))
defer s.metadata.Close()
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
- c.Check(v.AccessKeyID, check.Equals, "ASIAIOSFODNN7EXAMPLE")
- c.Check(v.SecretAccessKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
- c.Check(v.bucket.bucket.S3.Auth.AccessKey, check.Equals, "ASIAIOSFODNN7EXAMPLE")
- c.Check(v.bucket.bucket.S3.Auth.SecretKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
+ v := &s3Volume{
+ S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
+ IAMRole: s.metadata.URL + "/latest/api/token",
+ Endpoint: "http://localhost:12345",
+ Region: "test-region-1",
+ Bucket: "test-bucket-name",
+ },
+ cluster: s.cluster,
+ logger: ctxlog.TestLogger(c),
+ metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ }
+ err := v.check(s.metadata.URL + "/latest")
+ c.Check(err, check.IsNil)
+ creds, err := v.bucket.svc.Client.Config.Credentials.Retrieve(context.Background())
+ c.Check(err, check.IsNil)
+ c.Check(creds.AccessKeyID, check.Equals, "ASIAIOSFODNN7EXAMPLE")
+ c.Check(creds.SecretAccessKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
- deadv := &S3Volume{
+ deadv := &s3Volume{
S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
IAMRole: s.metadata.URL + "/fake-metadata/test-role",
Endpoint: "http://localhost:12345",
@@ -187,13 +198,20 @@ func (s *StubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
logger: ctxlog.TestLogger(c),
metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
}
- err := deadv.check()
- c.Check(err, check.ErrorMatches, `.*/fake-metadata/test-role.*`)
- c.Check(err, check.ErrorMatches, `.*404.*`)
+ err = deadv.check(s.metadata.URL + "/latest")
+ c.Check(err, check.IsNil)
+ _, err = deadv.bucket.svc.Client.Config.Credentials.Retrieve(context.Background())
+ c.Check(err, check.ErrorMatches, `(?s).*EC2RoleRequestError: no EC2 instance role found.*`)
+ c.Check(err, check.ErrorMatches, `(?s).*404.*`)
}
-func (s *StubbedS3Suite) TestStats(c *check.C) {
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+func (s *stubbedS3Suite) TestStats(c *check.C) {
+ v := s.newTestableVolume(c, newVolumeParams{
+ Cluster: s.cluster,
+ ConfigVolume: arvados.Volume{Replication: 2},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ }, 5*time.Minute)
stats := func() string {
buf, err := json.Marshal(v.InternalStats())
c.Check(err, check.IsNil)
@@ -203,30 +221,30 @@ func (s *StubbedS3Suite) TestStats(c *check.C) {
c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- _, err := v.Get(context.Background(), loc, make([]byte, 3))
+ err := v.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.NotNil)
c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
- c.Check(stats(), check.Matches, `.*"\*s3.Error 404 [^"]*":[^0].*`)
+ c.Check(stats(), check.Matches, `.*"s3.requestFailure 404 NoSuchKey[^"]*":[^0].*`)
c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
- err = v.Put(context.Background(), loc, []byte("foo"))
+ err = v.BlockWrite(context.Background(), loc, []byte("foo"))
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
- _, err = v.Get(context.Background(), loc, make([]byte, 3))
+ err = v.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.IsNil)
- _, err = v.Get(context.Background(), loc, make([]byte, 3))
+ err = v.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
}
-type blockingHandler struct {
+type s3AWSBlockingHandler struct {
requested chan *http.Request
unblock chan struct{}
}
-func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+func (h *s3AWSBlockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == "PUT" && !strings.Contains(strings.Trim(r.URL.Path, "/"), "/") {
// Accept PutBucket ("PUT /bucketname/"), called by
// newTestableVolume
@@ -241,40 +259,29 @@ func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, "nothing here", http.StatusNotFound)
}
-func (s *StubbedS3Suite) TestGetContextCancel(c *check.C) {
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- buf := make([]byte, 3)
-
- s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
- _, err := v.Get(ctx, loc, buf)
- return err
+func (s *stubbedS3Suite) TestGetContextCancel(c *check.C) {
+ s.testContextCancel(c, func(ctx context.Context, v *testableS3Volume) error {
+ return v.BlockRead(ctx, fooHash, brdiscard)
})
}
-func (s *StubbedS3Suite) TestCompareContextCancel(c *check.C) {
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- buf := []byte("bar")
-
- s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
- return v.Compare(ctx, loc, buf)
+func (s *stubbedS3Suite) TestPutContextCancel(c *check.C) {
+ s.testContextCancel(c, func(ctx context.Context, v *testableS3Volume) error {
+ return v.BlockWrite(ctx, fooHash, []byte("foo"))
})
}
-func (s *StubbedS3Suite) TestPutContextCancel(c *check.C) {
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- buf := []byte("foo")
-
- s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
- return v.Put(ctx, loc, buf)
- })
-}
-
-func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3Volume) error) {
- handler := &blockingHandler{}
+func (s *stubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *testableS3Volume) error) {
+ handler := &s3AWSBlockingHandler{}
s.s3server = httptest.NewServer(handler)
defer s.s3server.Close()
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+ v := s.newTestableVolume(c, newVolumeParams{
+ Cluster: s.cluster,
+ ConfigVolume: arvados.Volume{Replication: 2},
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ }, 5*time.Minute)
ctx, cancel := context.WithCancel(context.Background())
@@ -310,11 +317,17 @@ func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Con
}
}
-func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
+func (s *stubbedS3Suite) TestBackendStates(c *check.C) {
s.cluster.Collections.BlobTrashLifetime.Set("1h")
s.cluster.Collections.BlobSigningTTL.Set("1h")
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+ v := s.newTestableVolume(c, newVolumeParams{
+ Cluster: s.cluster,
+ ConfigVolume: arvados.Volume{Replication: 2},
+ Logger: ctxlog.TestLogger(c),
+ MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+ }, 5*time.Minute)
var none time.Time
putS3Obj := func(t time.Time, key string, data []byte) {
@@ -322,7 +335,20 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
return
}
v.serverClock.now = &t
- v.bucket.Bucket().Put(key, data, "application/octet-stream", s3ACL, s3.Options{})
+ uploader := s3manager.NewUploaderWithClient(v.bucket.svc)
+ _, err := uploader.UploadWithContext(context.Background(), &s3manager.UploadInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ Body: bytes.NewReader(data),
+ })
+ if err != nil {
+ panic(err)
+ }
+ v.serverClock.now = nil
+ _, err = v.head(key)
+ if err != nil {
+ panic(err)
+ }
}
t0 := time.Now()
@@ -443,7 +469,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
if prefixLength > 0 {
key = loc[:prefixLength] + "/" + loc
}
- c.Log("\t", loc)
+ c.Log("\t", loc, "\t", key)
putS3Obj(scenario.dataT, key, blk)
putS3Obj(scenario.recentT, "recent/"+key, nil)
putS3Obj(scenario.trashT, "trash/"+key, blk)
@@ -453,8 +479,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
// Check canGet
loc, blk := setupScenario()
- buf := make([]byte, len(blk))
- _, err := v.Get(context.Background(), loc, buf)
+ err := v.BlockRead(context.Background(), loc, brdiscard)
c.Check(err == nil, check.Equals, scenario.canGet)
if err != nil {
c.Check(os.IsNotExist(err), check.Equals, true)
@@ -462,9 +487,9 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
// Call Trash, then check canTrash and canGetAfterTrash
loc, _ = setupScenario()
- err = v.Trash(loc)
+ err = v.BlockTrash(loc)
c.Check(err == nil, check.Equals, scenario.canTrash)
- _, err = v.Get(context.Background(), loc, buf)
+ err = v.BlockRead(context.Background(), loc, brdiscard)
c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
if err != nil {
c.Check(os.IsNotExist(err), check.Equals, true)
@@ -472,14 +497,14 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
// Call Untrash, then check canUntrash
loc, _ = setupScenario()
- err = v.Untrash(loc)
+ err = v.BlockUntrash(loc)
c.Check(err == nil, check.Equals, scenario.canUntrash)
if scenario.dataT != none || scenario.trashT != none {
// In all scenarios where the data exists, we
// should be able to Get after Untrash --
// regardless of timestamps, errors, race
// conditions, etc.
- _, err = v.Get(context.Background(), loc, buf)
+ err = v.BlockRead(context.Background(), loc, brdiscard)
c.Check(err, check.IsNil)
}
@@ -487,7 +512,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
// freshAfterEmpty
loc, _ = setupScenario()
v.EmptyTrash()
- _, err = v.bucket.Head("trash/"+v.key(loc), nil)
+ _, err = v.head("trash/" + v.key(loc))
c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
if scenario.freshAfterEmpty {
t, err := v.Mtime(loc)
@@ -500,7 +525,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
// Check for current Mtime after Put (applies to all
// scenarios)
loc, blk = setupScenario()
- err = v.Put(context.Background(), loc, blk)
+ err = v.BlockWrite(context.Background(), loc, blk)
c.Check(err, check.IsNil)
t, err := v.Mtime(loc)
c.Check(err, check.IsNil)
@@ -509,18 +534,44 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
}
}
-type TestableS3Volume struct {
- *S3Volume
- server *s3test.Server
+type testableS3Volume struct {
+ *s3Volume
+ server *httptest.Server
c *check.C
- serverClock *fakeClock
+ serverClock *s3AWSFakeClock
}
-func (s *StubbedS3Suite) newTestableVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, raceWindow time.Duration) *TestableS3Volume {
- clock := &fakeClock{}
- srv, err := s3test.NewServer(&s3test.Config{Clock: clock})
- c.Assert(err, check.IsNil)
- endpoint := srv.URL()
+type LogrusLog struct {
+ log *logrus.FieldLogger
+}
+
+func (l LogrusLog) Print(level gofakes3.LogLevel, v ...interface{}) {
+ switch level {
+ case gofakes3.LogErr:
+ (*l.log).Errorln(v...)
+ case gofakes3.LogWarn:
+ (*l.log).Warnln(v...)
+ case gofakes3.LogInfo:
+ (*l.log).Infoln(v...)
+ default:
+ panic("unknown level")
+ }
+}
+
+func (s *stubbedS3Suite) newTestableVolume(c *check.C, params newVolumeParams, raceWindow time.Duration) *testableS3Volume {
+
+ clock := &s3AWSFakeClock{}
+ // fake s3
+ backend := s3mem.New(s3mem.WithTimeSource(clock))
+
+ // To enable GoFakeS3 debug logging, pass logger to gofakes3.WithLogger()
+ /* logger := new(LogrusLog)
+ ctxLogger := ctxlog.FromContext(context.Background())
+ logger.log = &ctxLogger */
+ faker := gofakes3.New(backend, gofakes3.WithTimeSource(clock), gofakes3.WithLogger(nil), gofakes3.WithTimeSkewLimit(0))
+ srv := httptest.NewServer(faker.Server())
+
+ endpoint := srv.URL
if s.s3server != nil {
endpoint = s.s3server.URL
}
@@ -530,65 +581,96 @@ func (s *StubbedS3Suite) newTestableVolume(c *check.C, cluster *arvados.Cluster,
iamRole, accessKey, secretKey = s.metadata.URL+"/fake-metadata/test-role", "", ""
}
- v := &TestableS3Volume{
- S3Volume: &S3Volume{
+ v := &testableS3Volume{
+ s3Volume: &s3Volume{
S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
IAMRole: iamRole,
AccessKeyID: accessKey,
SecretAccessKey: secretKey,
- Bucket: TestBucketName,
+ Bucket: s3TestBucketName,
Endpoint: endpoint,
Region: "test-region-1",
LocationConstraint: true,
UnsafeDelete: true,
IndexPageSize: 1000,
},
- cluster: cluster,
- volume: volume,
- logger: ctxlog.TestLogger(c),
- metrics: metrics,
+ cluster: params.Cluster,
+ volume: params.ConfigVolume,
+ logger: params.Logger,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
},
c: c,
server: srv,
serverClock: clock,
}
- c.Assert(v.S3Volume.check(), check.IsNil)
- c.Assert(v.bucket.Bucket().PutBucket(s3.ACL("private")), check.IsNil)
+ c.Assert(v.s3Volume.check(""), check.IsNil)
+ // Our test S3 server uses the older 'Path Style'
+ v.s3Volume.bucket.svc.ForcePathStyle = true
+ // Create the testbucket
+ input := &s3.CreateBucketInput{
+ Bucket: aws.String(s3TestBucketName),
+ }
+ req := v.s3Volume.bucket.svc.CreateBucketRequest(input)
+ _, err := req.Send(context.Background())
+ c.Assert(err, check.IsNil)
// We couldn't set RaceWindow until now because check()
// rejects negative values.
- v.S3Volume.RaceWindow = arvados.Duration(raceWindow)
+ v.s3Volume.RaceWindow = arvados.Duration(raceWindow)
return v
}
-// PutRaw skips the ContentMD5 test
-func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
+func (v *testableS3Volume) blockWriteWithoutMD5Check(loc string, block []byte) error {
key := v.key(loc)
- err := v.bucket.Bucket().Put(key, block, "application/octet-stream", s3ACL, s3.Options{})
- if err != nil {
- v.logger.Printf("PutRaw: %s: %+v", loc, err)
- }
- err = v.bucket.Bucket().Put("recent/"+key, nil, "application/octet-stream", s3ACL, s3.Options{})
+ r := newCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
+
+ uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
+ u.PartSize = 5 * 1024 * 1024
+ u.Concurrency = 13
+ })
+
+ _, err := uploader.Upload(&s3manager.UploadInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ Body: r,
+ })
if err != nil {
- v.logger.Printf("PutRaw: recent/%s: %+v", key, err)
+ return err
}
+
+ empty := bytes.NewReader([]byte{})
+ _, err = uploader.Upload(&s3manager.UploadInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String("recent/" + key),
+ Body: empty,
+ })
+ return err
}
// TouchWithDate turns back the clock while doing a Touch(). We assume
// there are no other operations happening on the same s3test server
// while we do this.
-func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) {
+func (v *testableS3Volume) TouchWithDate(loc string, lastPut time.Time) {
v.serverClock.now = &lastPut
- err := v.bucket.Bucket().Put("recent/"+v.key(locator), nil, "application/octet-stream", s3ACL, s3.Options{})
+
+ uploader := s3manager.NewUploaderWithClient(v.bucket.svc)
+ empty := bytes.NewReader([]byte{})
+ _, err := uploader.UploadWithContext(context.Background(), &s3manager.UploadInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String("recent/" + v.key(loc)),
+ Body: empty,
+ })
if err != nil {
panic(err)
}
+
v.serverClock.now = nil
}
-func (v *TestableS3Volume) Teardown() {
- v.server.Quit()
+func (v *testableS3Volume) Teardown() {
+ v.server.Close()
}
-func (v *TestableS3Volume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableS3Volume) ReadWriteOperationLabelValues() (r, w string) {
return "get", "put"
}
diff --git a/services/keepstore/s3aws_volume.go b/services/keepstore/s3aws_volume.go
deleted file mode 100644
index 6205da5beb..0000000000
--- a/services/keepstore/s3aws_volume.go
+++ /dev/null
@@ -1,909 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "os"
- "regexp"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/aws/awserr"
- "github.com/aws/aws-sdk-go-v2/aws/defaults"
- "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
- "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
- "github.com/aws/aws-sdk-go-v2/aws/endpoints"
- "github.com/aws/aws-sdk-go-v2/service/s3"
- "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/sirupsen/logrus"
-)
-
-// S3AWSVolume implements Volume using an S3 bucket.
-type S3AWSVolume struct {
- arvados.S3VolumeDriverParameters
- AuthToken string // populated automatically when IAMRole is used
- AuthExpiration time.Time // populated automatically when IAMRole is used
-
- cluster *arvados.Cluster
- volume arvados.Volume
- logger logrus.FieldLogger
- metrics *volumeMetricsVecs
- bucket *s3AWSbucket
- region string
- startOnce sync.Once
-}
-
-// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
-// wrapped bucket can be replaced atomically with SetBucket in order
-// to update credentials.
-type s3AWSbucket struct {
- bucket string
- svc *s3.Client
- stats s3awsbucketStats
- mu sync.Mutex
-}
-
-// chooseS3VolumeDriver distinguishes between the old goamz driver and
-// aws-sdk-go based on the UseAWSS3v2Driver feature flag
-func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, v)
- if err != nil {
- return nil, err
- }
- if v.UseAWSS3v2Driver {
- logger.Debugln("Using AWS S3 v2 driver")
- return newS3AWSVolume(cluster, volume, logger, metrics)
- }
- logger.Debugln("Using goamz S3 driver")
- return newS3Volume(cluster, volume, logger, metrics)
-}
-
-const (
- PartSize = 5 * 1024 * 1024
- ReadConcurrency = 13
- WriteConcurrency = 5
-)
-
-var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
-var s3AWSZeroTime time.Time
-
-func (v *S3AWSVolume) isKeepBlock(s string) (string, bool) {
- if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
- s = s[v.PrefixLength+1:]
- }
- return s, s3AWSKeepBlockRegexp.MatchString(s)
-}
-
-// Return the key used for a given loc. If PrefixLength==0 then
-// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
-// "abc/abcdef0123", etc.
-func (v *S3AWSVolume) key(loc string) string {
- if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
- return loc[:v.PrefixLength] + "/" + loc
- } else {
- return loc
- }
-}
-
-func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, v)
- if err != nil {
- return nil, err
- }
- v.logger = logger.WithField("Volume", v.String())
- return v, v.check("")
-}
-
-func (v *S3AWSVolume) translateError(err error) error {
- if _, ok := err.(*aws.RequestCanceledError); ok {
- return context.Canceled
- } else if aerr, ok := err.(awserr.Error); ok {
- if aerr.Code() == "NotFound" {
- return os.ErrNotExist
- } else if aerr.Code() == "NoSuchKey" {
- return os.ErrNotExist
- }
- }
- return err
-}
-
-// safeCopy calls CopyObjectRequest, and checks the response to make
-// sure the copy succeeded and updated the timestamp on the
-// destination object
-//
-// (If something goes wrong during the copy, the error will be
-// embedded in the 200 OK response)
-func (v *S3AWSVolume) safeCopy(dst, src string) error {
- input := &s3.CopyObjectInput{
- Bucket: aws.String(v.bucket.bucket),
- ContentType: aws.String("application/octet-stream"),
- CopySource: aws.String(v.bucket.bucket + "/" + src),
- Key: aws.String(dst),
- }
-
- req := v.bucket.svc.CopyObjectRequest(input)
- resp, err := req.Send(context.Background())
-
- err = v.translateError(err)
- if os.IsNotExist(err) {
- return err
- } else if err != nil {
- return fmt.Errorf("PutCopy(%q â %q): %s", dst, v.bucket.bucket+"/"+src, err)
- }
-
- if resp.CopyObjectResult.LastModified == nil {
- return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
- } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
- return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
- }
- return nil
-}
-
-func (v *S3AWSVolume) check(ec2metadataHostname string) error {
- if v.Bucket == "" {
- return errors.New("DriverParameters: Bucket must be provided")
- }
- if v.IndexPageSize == 0 {
- v.IndexPageSize = 1000
- }
- if v.RaceWindow < 0 {
- return errors.New("DriverParameters: RaceWindow must not be negative")
- }
-
- if v.V2Signature {
- return errors.New("DriverParameters: V2Signature is not supported")
- }
-
- defaultResolver := endpoints.NewDefaultResolver()
-
- cfg := defaults.Config()
-
- if v.Endpoint == "" && v.Region == "" {
- return fmt.Errorf("AWS region or endpoint must be specified")
- } else if v.Endpoint != "" || ec2metadataHostname != "" {
- myCustomResolver := func(service, region string) (aws.Endpoint, error) {
- if v.Endpoint != "" && service == "s3" {
- return aws.Endpoint{
- URL: v.Endpoint,
- SigningRegion: v.Region,
- }, nil
- } else if service == "ec2metadata" && ec2metadataHostname != "" {
- return aws.Endpoint{
- URL: ec2metadataHostname,
- }, nil
- }
-
- return defaultResolver.ResolveEndpoint(service, region)
- }
- cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
- }
-
- cfg.Region = v.Region
-
- // Zero timeouts mean "wait forever", which is a bad
- // default. Default to long timeouts instead.
- if v.ConnectTimeout == 0 {
- v.ConnectTimeout = s3DefaultConnectTimeout
- }
- if v.ReadTimeout == 0 {
- v.ReadTimeout = s3DefaultReadTimeout
- }
-
- creds := aws.NewChainProvider(
- []aws.CredentialsProvider{
- aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
- ec2rolecreds.New(ec2metadata.New(cfg)),
- })
-
- cfg.Credentials = creds
-
- v.bucket = &s3AWSbucket{
- bucket: v.Bucket,
- svc: s3.New(cfg),
- }
-
- // Set up prometheus metrics
- lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
- v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
-
- return nil
-}
-
-// String implements fmt.Stringer.
-func (v *S3AWSVolume) String() string {
- return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
-}
-
-// GetDeviceID returns a globally unique ID for the storage bucket.
-func (v *S3AWSVolume) GetDeviceID() string {
- return "s3://" + v.Endpoint + "/" + v.Bucket
-}
-
-// Compare the given data with the stored data.
-func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
- key := v.key(loc)
- errChan := make(chan error, 1)
- go func() {
- _, err := v.head("recent/" + key)
- errChan <- err
- }()
- var err error
- select {
- case <-ctx.Done():
- return ctx.Err()
- case err = <-errChan:
- }
- if err != nil {
- // Checking for the key itself here would interfere
- // with future GET requests.
- //
- // On AWS, if X doesn't exist, a HEAD or GET request
- // for X causes X's non-existence to be cached. Thus,
- // if we test for X, then create X and return a
- // signature to our client, the client might still get
- // 404 from all keepstores when trying to read it.
- //
- // To avoid this, we avoid doing HEAD X or GET X until
- // we know X has been written.
- //
- // Note that X might exist even though recent/X
- // doesn't: for example, the response to HEAD recent/X
- // might itself come from a stale cache. In such
- // cases, we will return a false negative and
- // PutHandler might needlessly create another replica
- // on a different volume. That's not ideal, but it's
- // better than passing the eventually-consistent
- // problem on to our clients.
- return v.translateError(err)
- }
-
- input := &s3.GetObjectInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(key),
- }
-
- req := v.bucket.svc.GetObjectRequest(input)
- result, err := req.Send(ctx)
- if err != nil {
- return v.translateError(err)
- }
- return v.translateError(compareReaderWithBuf(ctx, result.Body, expect, loc[:32]))
-}
-
-// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
-// and deletes them from the volume.
-func (v *S3AWSVolume) EmptyTrash() {
- if v.cluster.Collections.BlobDeleteConcurrency < 1 {
- return
- }
-
- var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
-
- // Define "ready to delete" as "...when EmptyTrash started".
- startT := time.Now()
-
- emptyOneKey := func(trash *s3.Object) {
- key := strings.TrimPrefix(*trash.Key, "trash/")
- loc, isblk := v.isKeepBlock(key)
- if !isblk {
- return
- }
- atomic.AddInt64(&bytesInTrash, *trash.Size)
- atomic.AddInt64(&blocksInTrash, 1)
-
- trashT := *trash.LastModified
- recent, err := v.head("recent/" + key)
- if err != nil && os.IsNotExist(v.translateError(err)) {
- v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
- err = v.Untrash(loc)
- if err != nil {
- v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
- }
- return
- } else if err != nil {
- v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
- return
- }
- if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
- if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
- // recent/key is too old to protect
- // loc from being Trashed again during
- // the raceWindow that starts if we
- // delete trash/X now.
- //
- // Note this means (TrashSweepInterval
- // < BlobSigningTTL - raceWindow) is
- // necessary to avoid starvation.
- v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
- v.fixRace(key)
- v.Touch(loc)
- return
- }
- _, err := v.head(key)
- if os.IsNotExist(err) {
- v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
- v.fixRace(key)
- return
- } else if err != nil {
- v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
- return
- }
- }
- if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
- return
- }
- err = v.bucket.Del(*trash.Key)
- if err != nil {
- v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
- return
- }
- atomic.AddInt64(&bytesDeleted, *trash.Size)
- atomic.AddInt64(&blocksDeleted, 1)
-
- _, err = v.head(*trash.Key)
- if err == nil {
- v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
- return
- }
- if !os.IsNotExist(v.translateError(err)) {
- v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
- return
- }
- err = v.bucket.Del("recent/" + key)
- if err != nil {
- v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
- }
- }
-
- var wg sync.WaitGroup
- todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
- for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for key := range todo {
- emptyOneKey(key)
- }
- }()
- }
-
- trashL := s3awsLister{
- Logger: v.logger,
- Bucket: v.bucket,
- Prefix: "trash/",
- PageSize: v.IndexPageSize,
- Stats: &v.bucket.stats,
- }
- for trash := trashL.First(); trash != nil; trash = trashL.Next() {
- todo <- trash
- }
- close(todo)
- wg.Wait()
-
- if err := trashL.Error(); err != nil {
- v.logger.WithError(err).Error("EmptyTrash: lister failed")
- }
- v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
-}
-
-// fixRace(X) is called when "recent/X" exists but "X" doesn't
-// exist. If the timestamps on "recent/X" and "trash/X" indicate there
-// was a race between Put and Trash, fixRace recovers from the race by
-// Untrashing the block.
-func (v *S3AWSVolume) fixRace(key string) bool {
- trash, err := v.head("trash/" + key)
- if err != nil {
- if !os.IsNotExist(v.translateError(err)) {
- v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
- }
- return false
- }
-
- recent, err := v.head("recent/" + key)
- if err != nil {
- v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
- return false
- }
-
- recentTime := *recent.LastModified
- trashTime := *trash.LastModified
- ageWhenTrashed := trashTime.Sub(recentTime)
- if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
- // No evidence of a race: block hasn't been written
- // since it became eligible for Trash. No fix needed.
- return false
- }
-
- v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
- v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
- err = v.safeCopy(key, "trash/"+key)
- if err != nil {
- v.logger.WithError(err).Error("fixRace: copy failed")
- return false
- }
- return true
-}
-
-func (v *S3AWSVolume) head(key string) (result *s3.HeadObjectOutput, err error) {
- input := &s3.HeadObjectInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(key),
- }
-
- req := v.bucket.svc.HeadObjectRequest(input)
- res, err := req.Send(context.TODO())
-
- v.bucket.stats.TickOps("head")
- v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
- v.bucket.stats.TickErr(err)
-
- if err != nil {
- return nil, v.translateError(err)
- }
- result = res.HeadObjectOutput
- return
-}
-
-// Get a block: copy the block data into buf, and return the number of
-// bytes copied.
-func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- // Do not use getWithPipe here: the BlockReader interface does not pass
- // through 'buf []byte', and we don't want to allocate two buffers for each
- // read request. Instead, use a version of ReadBlock that accepts 'buf []byte'
- // as an input.
- key := v.key(loc)
- count, err := v.readWorker(ctx, key, buf)
- if err == nil {
- return count, err
- }
-
- err = v.translateError(err)
- if !os.IsNotExist(err) {
- return 0, err
- }
-
- _, err = v.head("recent/" + key)
- err = v.translateError(err)
- if err != nil {
- // If we can't read recent/X, there's no point in
- // trying fixRace. Give up.
- return 0, err
- }
- if !v.fixRace(key) {
- err = os.ErrNotExist
- return 0, err
- }
-
- count, err = v.readWorker(ctx, key, buf)
- if err != nil {
- v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
- err = v.translateError(err)
- return 0, err
- }
- return count, err
-}
-
-func (v *S3AWSVolume) readWorker(ctx context.Context, key string, buf []byte) (int, error) {
- awsBuf := aws.NewWriteAtBuffer(buf)
- downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
- u.PartSize = PartSize
- u.Concurrency = ReadConcurrency
- })
-
- v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
-
- count, err := downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(key),
- })
- v.bucket.stats.TickOps("get")
- v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
- v.bucket.stats.TickErr(err)
- v.bucket.stats.TickInBytes(uint64(count))
- return int(count), v.translateError(err)
-}
-
-func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader) error {
- if r == nil {
- // r == nil leads to a memory violation in func readFillBuf in
- // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
- r = bytes.NewReader(nil)
- }
-
- uploadInput := s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(key),
- Body: r,
- }
-
- if loc, ok := v.isKeepBlock(key); ok {
- var contentMD5 string
- md5, err := hex.DecodeString(loc)
- if err != nil {
- return v.translateError(err)
- }
- contentMD5 = base64.StdEncoding.EncodeToString(md5)
- uploadInput.ContentMD5 = &contentMD5
- }
-
- // Experimentation indicated that using concurrency 5 yields the best
- // throughput, better than higher concurrency (10 or 13) by ~5%.
- // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
- // is detrimental to througput (minus ~15%).
- uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
- u.PartSize = PartSize
- u.Concurrency = WriteConcurrency
- })
-
- // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
- // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
- // block, so there is no extra memory use to be concerned about. See
- // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
- // calculating the Sha-256 because we don't need it; we already use md5sum
- // hashes that match the name of the block.
- _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
- r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
- }))
-
- v.bucket.stats.TickOps("put")
- v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
- v.bucket.stats.TickErr(err)
-
- return v.translateError(err)
-}
-
-// Put writes a block.
-func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
- // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
- // sdk to avoid memory allocation there. See #17339 for more information.
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
-
- rdr := bytes.NewReader(block)
- r := NewCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
- key := v.key(loc)
- err := v.writeObject(ctx, key, r)
- if err != nil {
- return err
- }
- return v.writeObject(ctx, "recent/"+key, nil)
-}
-
-type s3awsLister struct {
- Logger logrus.FieldLogger
- Bucket *s3AWSbucket
- Prefix string
- PageSize int
- Stats *s3awsbucketStats
- ContinuationToken string
- buf []s3.Object
- err error
-}
-
-// First fetches the first page and returns the first item. It returns
-// nil if the response is the empty set or an error occurs.
-func (lister *s3awsLister) First() *s3.Object {
- lister.getPage()
- return lister.pop()
-}
-
-// Next returns the next item, fetching the next page if necessary. It
-// returns nil if the last available item has already been fetched, or
-// an error occurs.
-func (lister *s3awsLister) Next() *s3.Object {
- if len(lister.buf) == 0 && lister.ContinuationToken != "" {
- lister.getPage()
- }
- return lister.pop()
-}
-
-// Return the most recent error encountered by First or Next.
-func (lister *s3awsLister) Error() error {
- return lister.err
-}
-
-func (lister *s3awsLister) getPage() {
- lister.Stats.TickOps("list")
- lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
-
- var input *s3.ListObjectsV2Input
- if lister.ContinuationToken == "" {
- input = &s3.ListObjectsV2Input{
- Bucket: aws.String(lister.Bucket.bucket),
- MaxKeys: aws.Int64(int64(lister.PageSize)),
- Prefix: aws.String(lister.Prefix),
- }
- } else {
- input = &s3.ListObjectsV2Input{
- Bucket: aws.String(lister.Bucket.bucket),
- MaxKeys: aws.Int64(int64(lister.PageSize)),
- Prefix: aws.String(lister.Prefix),
- ContinuationToken: &lister.ContinuationToken,
- }
- }
-
- req := lister.Bucket.svc.ListObjectsV2Request(input)
- resp, err := req.Send(context.Background())
- if err != nil {
- if aerr, ok := err.(awserr.Error); ok {
- lister.err = aerr
- } else {
- lister.err = err
- }
- return
- }
-
- if *resp.IsTruncated {
- lister.ContinuationToken = *resp.NextContinuationToken
- } else {
- lister.ContinuationToken = ""
- }
- lister.buf = make([]s3.Object, 0, len(resp.Contents))
- for _, key := range resp.Contents {
- if !strings.HasPrefix(*key.Key, lister.Prefix) {
- lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
- continue
- }
- lister.buf = append(lister.buf, key)
- }
-}
-
-func (lister *s3awsLister) pop() (k *s3.Object) {
- if len(lister.buf) > 0 {
- k = &lister.buf[0]
- lister.buf = lister.buf[1:]
- }
- return
-}
-
-// IndexTo writes a complete list of locators with the given prefix
-// for which Get() can retrieve data.
-func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
- prefix = v.key(prefix)
- // Use a merge sort to find matching sets of X and recent/X.
- dataL := s3awsLister{
- Logger: v.logger,
- Bucket: v.bucket,
- Prefix: prefix,
- PageSize: v.IndexPageSize,
- Stats: &v.bucket.stats,
- }
- recentL := s3awsLister{
- Logger: v.logger,
- Bucket: v.bucket,
- Prefix: "recent/" + prefix,
- PageSize: v.IndexPageSize,
- Stats: &v.bucket.stats,
- }
- for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
- if *data.Key >= "g" {
- // Conveniently, "recent/*" and "trash/*" are
- // lexically greater than all hex-encoded data
- // hashes, so stopping here avoids iterating
- // over all of them needlessly with dataL.
- break
- }
- loc, isblk := v.isKeepBlock(*data.Key)
- if !isblk {
- continue
- }
-
- // stamp is the list entry we should use to report the
- // last-modified time for this data block: it will be
- // the recent/X entry if one exists, otherwise the
- // entry for the data block itself.
- stamp := data
-
- // Advance to the corresponding recent/X marker, if any
- for recent != nil && recentL.Error() == nil {
- if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
- recent = recentL.Next()
- continue
- } else if cmp == 0 {
- stamp = recent
- recent = recentL.Next()
- break
- } else {
- // recent/X marker is missing: we'll
- // use the timestamp on the data
- // object.
- break
- }
- }
- if err := recentL.Error(); err != nil {
- return err
- }
- // We truncate sub-second precision here. Otherwise
- // timestamps will never match the RFC1123-formatted
- // Last-Modified values parsed by Mtime().
- fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
- }
- return dataL.Error()
-}
-
-// Mtime returns the stored timestamp for the given locator.
-func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
- key := v.key(loc)
- _, err := v.head(key)
- if err != nil {
- return s3AWSZeroTime, v.translateError(err)
- }
- resp, err := v.head("recent/" + key)
- err = v.translateError(err)
- if os.IsNotExist(err) {
- // The data object X exists, but recent/X is missing.
- err = v.writeObject(context.Background(), "recent/"+key, nil)
- if err != nil {
- v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
- return s3AWSZeroTime, v.translateError(err)
- }
- v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
- resp, err = v.head("recent/" + key)
- if err != nil {
- v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
- return s3AWSZeroTime, v.translateError(err)
- }
- } else if err != nil {
- // HEAD recent/X failed for some other reason.
- return s3AWSZeroTime, err
- }
- return *resp.LastModified, err
-}
-
-// Status returns a *VolumeStatus representing the current in-use
-// storage capacity and a fake available capacity that doesn't make
-// the volume seem full or nearly-full.
-func (v *S3AWSVolume) Status() *VolumeStatus {
- return &VolumeStatus{
- DeviceNum: 1,
- BytesFree: BlockSize * 1000,
- BytesUsed: 1,
- }
-}
-
-// InternalStats returns bucket I/O and API call counters.
-func (v *S3AWSVolume) InternalStats() interface{} {
- return &v.bucket.stats
-}
-
-// Touch sets the timestamp for the given locator to the current time.
-func (v *S3AWSVolume) Touch(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- key := v.key(loc)
- _, err := v.head(key)
- err = v.translateError(err)
- if os.IsNotExist(err) && v.fixRace(key) {
- // The data object got trashed in a race, but fixRace
- // rescued it.
- } else if err != nil {
- return err
- }
- err = v.writeObject(context.Background(), "recent/"+key, nil)
- return v.translateError(err)
-}
-
-// checkRaceWindow returns a non-nil error if trash/key is, or might
-// be, in the race window (i.e., it's not safe to trash key).
-func (v *S3AWSVolume) checkRaceWindow(key string) error {
- resp, err := v.head("trash/" + key)
- err = v.translateError(err)
- if os.IsNotExist(err) {
- // OK, trash/X doesn't exist so we're not in the race
- // window
- return nil
- } else if err != nil {
- // Error looking up trash/X. We don't know whether
- // we're in the race window
- return err
- }
- t := resp.LastModified
- safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
- if safeWindow <= 0 {
- // We can't count on "touch trash/X" to prolong
- // trash/X's lifetime. The new timestamp might not
- // become visible until now+raceWindow, and EmptyTrash
- // is allowed to delete trash/X before then.
- return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
- }
- // trash/X exists, but it won't be eligible for deletion until
- // after now+raceWindow, so it's safe to overwrite it.
- return nil
-}
-
-func (b *s3AWSbucket) Del(path string) error {
- input := &s3.DeleteObjectInput{
- Bucket: aws.String(b.bucket),
- Key: aws.String(path),
- }
- req := b.svc.DeleteObjectRequest(input)
- _, err := req.Send(context.Background())
- b.stats.TickOps("delete")
- b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
- b.stats.TickErr(err)
- return err
-}
-
-// Trash a Keep block.
-func (v *S3AWSVolume) Trash(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- if t, err := v.Mtime(loc); err != nil {
- return err
- } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
- return nil
- }
- key := v.key(loc)
- if v.cluster.Collections.BlobTrashLifetime == 0 {
- if !v.UnsafeDelete {
- return ErrS3TrashDisabled
- }
- return v.translateError(v.bucket.Del(key))
- }
- err := v.checkRaceWindow(key)
- if err != nil {
- return err
- }
- err = v.safeCopy("trash/"+key, key)
- if err != nil {
- return err
- }
- return v.translateError(v.bucket.Del(key))
-}
-
-// Untrash moves block from trash back into store
-func (v *S3AWSVolume) Untrash(loc string) error {
- key := v.key(loc)
- err := v.safeCopy(key, "trash/"+key)
- if err != nil {
- return err
- }
- err = v.writeObject(context.Background(), "recent/"+key, nil)
- return v.translateError(err)
-}
-
-type s3awsbucketStats struct {
- statsTicker
- Ops uint64
- GetOps uint64
- PutOps uint64
- HeadOps uint64
- DelOps uint64
- ListOps uint64
-}
-
-func (s *s3awsbucketStats) TickErr(err error) {
- if err == nil {
- return
- }
- errType := fmt.Sprintf("%T", err)
- if aerr, ok := err.(awserr.Error); ok {
- if reqErr, ok := err.(awserr.RequestFailure); ok {
- // A service error occurred
- errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
- } else {
- errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
- }
- }
- s.statsTicker.TickErr(err, errType)
-}
diff --git a/services/keepstore/s3aws_volume_test.go b/services/keepstore/s3aws_volume_test.go
deleted file mode 100644
index c7e2d485df..0000000000
--- a/services/keepstore/s3aws_volume_test.go
+++ /dev/null
@@ -1,675 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "bytes"
- "context"
- "crypto/md5"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/http/httptest"
- "os"
- "strings"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
-
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/service/s3"
- "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
-
- "github.com/johannesboyne/gofakes3"
- "github.com/johannesboyne/gofakes3/backend/s3mem"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/sirupsen/logrus"
- check "gopkg.in/check.v1"
-)
-
-const (
- S3AWSTestBucketName = "testbucket"
-)
-
-type s3AWSFakeClock struct {
- now *time.Time
-}
-
-func (c *s3AWSFakeClock) Now() time.Time {
- if c.now == nil {
- return time.Now().UTC()
- }
- return c.now.UTC()
-}
-
-func (c *s3AWSFakeClock) Since(t time.Time) time.Duration {
- return c.Now().Sub(t)
-}
-
-var _ = check.Suite(&StubbedS3AWSSuite{})
-
-var srv httptest.Server
-
-type StubbedS3AWSSuite struct {
- s3server *httptest.Server
- metadata *httptest.Server
- cluster *arvados.Cluster
- handler *handler
- volumes []*TestableS3AWSVolume
-}
-
-func (s *StubbedS3AWSSuite) SetUpTest(c *check.C) {
- s.s3server = nil
- s.metadata = nil
- s.cluster = testCluster(c)
- s.cluster.Volumes = map[string]arvados.Volume{
- "zzzzz-nyw5e-000000000000000": {Driver: "S3"},
- "zzzzz-nyw5e-111111111111111": {Driver: "S3"},
- }
- s.handler = &handler{}
-}
-
-func (s *StubbedS3AWSSuite) TestGeneric(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- // Use a negative raceWindow so s3test's 1-second
- // timestamp precision doesn't confuse fixRace.
- return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
- })
-}
-
-func (s *StubbedS3AWSSuite) TestGenericReadOnly(c *check.C) {
- DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
- })
-}
-
-func (s *StubbedS3AWSSuite) TestGenericWithPrefix(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- v := s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
- v.PrefixLength = 3
- return v
- })
-}
-
-func (s *StubbedS3AWSSuite) TestIndex(c *check.C) {
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 0)
- v.IndexPageSize = 3
- for i := 0; i < 256; i++ {
- v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
- }
- for _, spec := range []struct {
- prefix string
- expectMatch int
- }{
- {"", 256},
- {"c", 16},
- {"bc", 1},
- {"abc", 0},
- } {
- buf := new(bytes.Buffer)
- err := v.IndexTo(spec.prefix, buf)
- c.Check(err, check.IsNil)
-
- idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
- c.Check(len(idx), check.Equals, spec.expectMatch+1)
- c.Check(len(idx[len(idx)-1]), check.Equals, 0)
- }
-}
-
-func (s *StubbedS3AWSSuite) TestSignature(c *check.C) {
- var header http.Header
- stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- header = r.Header
- }))
- defer stub.Close()
-
- // The aws-sdk-go-v2 driver only supports S3 V4 signatures. S3 v2 signatures are being phased out
- // as of June 24, 2020. Cf. https://forums.aws.amazon.com/ann.jspa?annID=5816
- vol := S3AWSVolume{
- S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
- AccessKeyID: "xxx",
- SecretAccessKey: "xxx",
- Endpoint: stub.URL,
- Region: "test-region-1",
- Bucket: "test-bucket-name",
- },
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
- }
- err := vol.check("")
- // Our test S3 server uses the older 'Path Style'
- vol.bucket.svc.ForcePathStyle = true
-
- c.Check(err, check.IsNil)
- err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
- c.Check(err, check.IsNil)
- c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
-}
-
-func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
- s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339)
- exp := time.Now().UTC().Add(time.Hour).Format(time.RFC3339)
- // Literal example from
- // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
- // but with updated timestamps
- io.WriteString(w, `{"Code":"Success","LastUpdated":"`+upd+`","Type":"AWS-HMAC","AccessKeyId":"ASIAIOSFODNN7EXAMPLE","SecretAccessKey":"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY","Token":"token","Expiration":"`+exp+`"}`)
- }))
- defer s.metadata.Close()
-
- v := &S3AWSVolume{
- S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
- IAMRole: s.metadata.URL + "/latest/api/token",
- Endpoint: "http://localhost:12345",
- Region: "test-region-1",
- Bucket: "test-bucket-name",
- },
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
- }
- err := v.check(s.metadata.URL + "/latest")
- c.Check(err, check.IsNil)
- creds, err := v.bucket.svc.Client.Config.Credentials.Retrieve(context.Background())
- c.Check(err, check.IsNil)
- c.Check(creds.AccessKeyID, check.Equals, "ASIAIOSFODNN7EXAMPLE")
- c.Check(creds.SecretAccessKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
-
- s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusNotFound)
- }))
- deadv := &S3AWSVolume{
- S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
- IAMRole: s.metadata.URL + "/fake-metadata/test-role",
- Endpoint: "http://localhost:12345",
- Region: "test-region-1",
- Bucket: "test-bucket-name",
- },
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
- }
- err = deadv.check(s.metadata.URL + "/latest")
- c.Check(err, check.IsNil)
- _, err = deadv.bucket.svc.Client.Config.Credentials.Retrieve(context.Background())
- c.Check(err, check.ErrorMatches, `(?s).*EC2RoleRequestError: no EC2 instance role found.*`)
- c.Check(err, check.ErrorMatches, `(?s).*404.*`)
-}
-
-func (s *StubbedS3AWSSuite) TestStats(c *check.C) {
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
- stats := func() string {
- buf, err := json.Marshal(v.InternalStats())
- c.Check(err, check.IsNil)
- return string(buf)
- }
-
- c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
-
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- _, err := v.Get(context.Background(), loc, make([]byte, 3))
- c.Check(err, check.NotNil)
- c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
- c.Check(stats(), check.Matches, `.*"s3.requestFailure 404 NoSuchKey[^"]*":[^0].*`)
- c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
-
- err = v.Put(context.Background(), loc, []byte("foo"))
- c.Check(err, check.IsNil)
- c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
- c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
-
- _, err = v.Get(context.Background(), loc, make([]byte, 3))
- c.Check(err, check.IsNil)
- _, err = v.Get(context.Background(), loc, make([]byte, 3))
- c.Check(err, check.IsNil)
- c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
-}
-
-type s3AWSBlockingHandler struct {
- requested chan *http.Request
- unblock chan struct{}
-}
-
-func (h *s3AWSBlockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method == "PUT" && !strings.Contains(strings.Trim(r.URL.Path, "/"), "/") {
- // Accept PutBucket ("PUT /bucketname/"), called by
- // newTestableVolume
- return
- }
- if h.requested != nil {
- h.requested <- r
- }
- if h.unblock != nil {
- <-h.unblock
- }
- http.Error(w, "nothing here", http.StatusNotFound)
-}
-
-func (s *StubbedS3AWSSuite) TestGetContextCancel(c *check.C) {
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- buf := make([]byte, 3)
-
- s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
- _, err := v.Get(ctx, loc, buf)
- return err
- })
-}
-
-func (s *StubbedS3AWSSuite) TestCompareContextCancel(c *check.C) {
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- buf := []byte("bar")
-
- s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
- return v.Compare(ctx, loc, buf)
- })
-}
-
-func (s *StubbedS3AWSSuite) TestPutContextCancel(c *check.C) {
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- buf := []byte("foo")
-
- s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
- return v.Put(ctx, loc, buf)
- })
-}
-
-func (s *StubbedS3AWSSuite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3AWSVolume) error) {
- handler := &s3AWSBlockingHandler{}
- s.s3server = httptest.NewServer(handler)
- defer s.s3server.Close()
-
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
-
- ctx, cancel := context.WithCancel(context.Background())
-
- handler.requested = make(chan *http.Request)
- handler.unblock = make(chan struct{})
- defer close(handler.unblock)
-
- doneFunc := make(chan struct{})
- go func() {
- err := testFunc(ctx, v)
- c.Check(err, check.Equals, context.Canceled)
- close(doneFunc)
- }()
-
- timeout := time.After(10 * time.Second)
-
- // Wait for the stub server to receive a request, meaning
- // Get() is waiting for an s3 operation.
- select {
- case <-timeout:
- c.Fatal("timed out waiting for test func to call our handler")
- case <-doneFunc:
- c.Fatal("test func finished without even calling our handler!")
- case <-handler.requested:
- }
-
- cancel()
-
- select {
- case <-timeout:
- c.Fatal("timed out")
- case <-doneFunc:
- }
-}
-
-func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
- s.cluster.Collections.BlobTrashLifetime.Set("1h")
- s.cluster.Collections.BlobSigningTTL.Set("1h")
-
- v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
- var none time.Time
-
- putS3Obj := func(t time.Time, key string, data []byte) {
- if t == none {
- return
- }
- v.serverClock.now = &t
- uploader := s3manager.NewUploaderWithClient(v.bucket.svc)
- _, err := uploader.UploadWithContext(context.Background(), &s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(key),
- Body: bytes.NewReader(data),
- })
- if err != nil {
- panic(err)
- }
- v.serverClock.now = nil
- _, err = v.head(key)
- if err != nil {
- panic(err)
- }
- }
-
- t0 := time.Now()
- nextKey := 0
- for _, scenario := range []struct {
- label string
- dataT time.Time
- recentT time.Time
- trashT time.Time
- canGet bool
- canTrash bool
- canGetAfterTrash bool
- canUntrash bool
- haveTrashAfterEmpty bool
- freshAfterEmpty bool
- }{
- {
- "No related objects",
- none, none, none,
- false, false, false, false, false, false,
- },
- {
- // Stored by older version, or there was a
- // race between EmptyTrash and Put: Trash is a
- // no-op even though the data object is very
- // old
- "No recent/X",
- t0.Add(-48 * time.Hour), none, none,
- true, true, true, false, false, false,
- },
- {
- "Not trash, but old enough to be eligible for trash",
- t0.Add(-24 * time.Hour), t0.Add(-2 * time.Hour), none,
- true, true, false, false, false, false,
- },
- {
- "Not trash, and not old enough to be eligible for trash",
- t0.Add(-24 * time.Hour), t0.Add(-30 * time.Minute), none,
- true, true, true, false, false, false,
- },
- {
- "Trashed + untrashed copies exist, due to recent race between Trash and Put",
- t0.Add(-24 * time.Hour), t0.Add(-3 * time.Minute), t0.Add(-2 * time.Minute),
- true, true, true, true, true, false,
- },
- {
- "Trashed + untrashed copies exist, trash nearly eligible for deletion: prone to Trash race",
- t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
- true, false, true, true, true, false,
- },
- {
- "Trashed + untrashed copies exist, trash is eligible for deletion: prone to Trash race",
- t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-61 * time.Minute),
- true, false, true, true, false, false,
- },
- {
- "Trashed + untrashed copies exist, due to old race between Put and unfinished Trash: emptying trash is unsafe",
- t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-12 * time.Hour),
- true, false, true, true, true, true,
- },
- {
- "Trashed + untrashed copies exist, used to be unsafe to empty, but since made safe by fixRace+Touch",
- t0.Add(-time.Second), t0.Add(-time.Second), t0.Add(-12 * time.Hour),
- true, true, true, true, false, false,
- },
- {
- "Trashed + untrashed copies exist because Trash operation was interrupted (no race)",
- t0.Add(-24 * time.Hour), t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour),
- true, false, true, true, false, false,
- },
- {
- "Trash, not yet eligible for deletion",
- none, t0.Add(-12 * time.Hour), t0.Add(-time.Minute),
- false, false, false, true, true, false,
- },
- {
- "Trash, not yet eligible for deletion, prone to races",
- none, t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
- false, false, false, true, true, false,
- },
- {
- "Trash, eligible for deletion",
- none, t0.Add(-12 * time.Hour), t0.Add(-2 * time.Hour),
- false, false, false, true, false, false,
- },
- {
- "Erroneously trashed during a race, detected before BlobTrashLifetime",
- none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute),
- true, false, true, true, true, false,
- },
- {
- "Erroneously trashed during a race, rescue during EmptyTrash despite reaching BlobTrashLifetime",
- none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute),
- true, false, true, true, true, false,
- },
- {
- "Trashed copy exists with no recent/* marker (cause unknown); repair by untrashing",
- none, none, t0.Add(-time.Minute),
- false, false, false, true, true, true,
- },
- } {
- for _, prefixLength := range []int{0, 3} {
- v.PrefixLength = prefixLength
- c.Logf("Scenario: %q (prefixLength=%d)", scenario.label, prefixLength)
-
- // We have a few tests to run for each scenario, and
- // the tests are expected to change state. By calling
- // this setup func between tests, we (re)create the
- // scenario as specified, using a new unique block
- // locator to prevent interference from previous
- // tests.
-
- setupScenario := func() (string, []byte) {
- nextKey++
- blk := []byte(fmt.Sprintf("%d", nextKey))
- loc := fmt.Sprintf("%x", md5.Sum(blk))
- key := loc
- if prefixLength > 0 {
- key = loc[:prefixLength] + "/" + loc
- }
- c.Log("\t", loc, "\t", key)
- putS3Obj(scenario.dataT, key, blk)
- putS3Obj(scenario.recentT, "recent/"+key, nil)
- putS3Obj(scenario.trashT, "trash/"+key, blk)
- v.serverClock.now = &t0
- return loc, blk
- }
-
- // Check canGet
- loc, blk := setupScenario()
- buf := make([]byte, len(blk))
- _, err := v.Get(context.Background(), loc, buf)
- c.Check(err == nil, check.Equals, scenario.canGet)
- if err != nil {
- c.Check(os.IsNotExist(err), check.Equals, true)
- }
-
- // Call Trash, then check canTrash and canGetAfterTrash
- loc, _ = setupScenario()
- err = v.Trash(loc)
- c.Check(err == nil, check.Equals, scenario.canTrash)
- _, err = v.Get(context.Background(), loc, buf)
- c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
- if err != nil {
- c.Check(os.IsNotExist(err), check.Equals, true)
- }
-
- // Call Untrash, then check canUntrash
- loc, _ = setupScenario()
- err = v.Untrash(loc)
- c.Check(err == nil, check.Equals, scenario.canUntrash)
- if scenario.dataT != none || scenario.trashT != none {
- // In all scenarios where the data exists, we
- // should be able to Get after Untrash --
- // regardless of timestamps, errors, race
- // conditions, etc.
- _, err = v.Get(context.Background(), loc, buf)
- c.Check(err, check.IsNil)
- }
-
- // Call EmptyTrash, then check haveTrashAfterEmpty and
- // freshAfterEmpty
- loc, _ = setupScenario()
- v.EmptyTrash()
- _, err = v.head("trash/" + v.key(loc))
- c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
- if scenario.freshAfterEmpty {
- t, err := v.Mtime(loc)
- c.Check(err, check.IsNil)
- // new mtime must be current (with an
- // allowance for 1s timestamp precision)
- c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
- }
-
- // Check for current Mtime after Put (applies to all
- // scenarios)
- loc, blk = setupScenario()
- err = v.Put(context.Background(), loc, blk)
- c.Check(err, check.IsNil)
- t, err := v.Mtime(loc)
- c.Check(err, check.IsNil)
- c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
- }
- }
-}
-
-type TestableS3AWSVolume struct {
- *S3AWSVolume
- server *httptest.Server
- c *check.C
- serverClock *s3AWSFakeClock
-}
-
-type LogrusLog struct {
- log *logrus.FieldLogger
-}
-
-func (l LogrusLog) Print(level gofakes3.LogLevel, v ...interface{}) {
- switch level {
- case gofakes3.LogErr:
- (*l.log).Errorln(v...)
- case gofakes3.LogWarn:
- (*l.log).Warnln(v...)
- case gofakes3.LogInfo:
- (*l.log).Infoln(v...)
- default:
- panic("unknown level")
- }
-}
-
-func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, raceWindow time.Duration) *TestableS3AWSVolume {
-
- clock := &s3AWSFakeClock{}
- // fake s3
- backend := s3mem.New(s3mem.WithTimeSource(clock))
-
- // To enable GoFakeS3 debug logging, pass logger to gofakes3.WithLogger()
- /* logger := new(LogrusLog)
- ctxLogger := ctxlog.FromContext(context.Background())
- logger.log = &ctxLogger */
- faker := gofakes3.New(backend, gofakes3.WithTimeSource(clock), gofakes3.WithLogger(nil), gofakes3.WithTimeSkewLimit(0))
- srv := httptest.NewServer(faker.Server())
-
- endpoint := srv.URL
- if s.s3server != nil {
- endpoint = s.s3server.URL
- }
-
- iamRole, accessKey, secretKey := "", "xxx", "xxx"
- if s.metadata != nil {
- iamRole, accessKey, secretKey = s.metadata.URL+"/fake-metadata/test-role", "", ""
- }
-
- v := &TestableS3AWSVolume{
- S3AWSVolume: &S3AWSVolume{
- S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
- IAMRole: iamRole,
- AccessKeyID: accessKey,
- SecretAccessKey: secretKey,
- Bucket: S3AWSTestBucketName,
- Endpoint: endpoint,
- Region: "test-region-1",
- LocationConstraint: true,
- UnsafeDelete: true,
- IndexPageSize: 1000,
- },
- cluster: cluster,
- volume: volume,
- logger: ctxlog.TestLogger(c),
- metrics: metrics,
- },
- c: c,
- server: srv,
- serverClock: clock,
- }
- c.Assert(v.S3AWSVolume.check(""), check.IsNil)
- // Our test S3 server uses the older 'Path Style'
- v.S3AWSVolume.bucket.svc.ForcePathStyle = true
- // Create the testbucket
- input := &s3.CreateBucketInput{
- Bucket: aws.String(S3AWSTestBucketName),
- }
- req := v.S3AWSVolume.bucket.svc.CreateBucketRequest(input)
- _, err := req.Send(context.Background())
- c.Assert(err, check.IsNil)
- // We couldn't set RaceWindow until now because check()
- // rejects negative values.
- v.S3AWSVolume.RaceWindow = arvados.Duration(raceWindow)
- return v
-}
-
-// PutRaw skips the ContentMD5 test
-func (v *TestableS3AWSVolume) PutRaw(loc string, block []byte) {
- key := v.key(loc)
- r := NewCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
-
- uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
- u.PartSize = 5 * 1024 * 1024
- u.Concurrency = 13
- })
-
- _, err := uploader.Upload(&s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(key),
- Body: r,
- })
- if err != nil {
- v.logger.Printf("PutRaw: %s: %+v", key, err)
- }
-
- empty := bytes.NewReader([]byte{})
- _, err = uploader.Upload(&s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String("recent/" + key),
- Body: empty,
- })
- if err != nil {
- v.logger.Printf("PutRaw: recent/%s: %+v", key, err)
- }
-}
-
-// TouchWithDate turns back the clock while doing a Touch(). We assume
-// there are no other operations happening on the same s3test server
-// while we do this.
-func (v *TestableS3AWSVolume) TouchWithDate(loc string, lastPut time.Time) {
- v.serverClock.now = &lastPut
-
- uploader := s3manager.NewUploaderWithClient(v.bucket.svc)
- empty := bytes.NewReader([]byte{})
- _, err := uploader.UploadWithContext(context.Background(), &s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String("recent/" + v.key(loc)),
- Body: empty,
- })
- if err != nil {
- panic(err)
- }
-
- v.serverClock.now = nil
-}
-
-func (v *TestableS3AWSVolume) Teardown() {
- v.server.Close()
-}
-
-func (v *TestableS3AWSVolume) ReadWriteOperationLabelValues() (r, w string) {
- return "get", "put"
-}
diff --git a/services/keepstore/status_test.go b/services/keepstore/status_test.go
deleted file mode 100644
index 80f98adb22..0000000000
--- a/services/keepstore/status_test.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "encoding/json"
-)
-
-// We don't have isolated unit tests for /status.json yet, but we do
-// check (e.g., in pull_worker_test.go) that /status.json reports
-// specific statistics correctly at the appropriate times.
-
-// getStatusItem("foo","bar","baz") retrieves /status.json, decodes
-// the response body into resp, and returns resp["foo"]["bar"]["baz"].
-func getStatusItem(h *handler, keys ...string) interface{} {
- resp := IssueRequest(h, &RequestTester{"/status.json", "", "GET", nil, ""})
- var s interface{}
- json.NewDecoder(resp.Body).Decode(&s)
- for _, k := range keys {
- s = s.(map[string]interface{})[k]
- }
- return s
-}
diff --git a/services/keepstore/streamwriterat.go b/services/keepstore/streamwriterat.go
new file mode 100644
index 0000000000..02dce6e216
--- /dev/null
+++ b/services/keepstore/streamwriterat.go
@@ -0,0 +1,160 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// streamWriterAt translates random-access writes to sequential
+// writes. The caller is expected to use an arbitrary sequence of
+// non-overlapping WriteAt calls covering all positions between 0 and
+// N, for any N < len(buf), then call Close.
+//
+// streamWriterAt writes the data to the provided io.Writer in
+// sequential order.
+//
+// streamWriterAt can also be wrapped with an io.OffsetWriter to
+// provide an asynchronous buffer: the caller can use the io.Writer
+// interface to write into a memory buffer and return without waiting
+// for the wrapped writer to catch up.
+//
+// Close returns when all data has been written through.
+type streamWriterAt struct {
+ writer io.Writer
+ buf []byte
+ writepos int // target offset if Write is called
+ partsize int // size of each part written through to writer
+ endpos int // portion of buf actually used, judging by WriteAt calls so far
+ partfilled []int // number of bytes written to each part so far
+ partready chan []byte // parts of buf fully written / waiting for writer goroutine
+ partnext int // index of next part we will send to partready when it's ready
+ wroteAt int // bytes we copied to buf in WriteAt
+ wrote int // bytes successfully written through to writer
+ errWrite chan error // final outcome of writer goroutine
+ closed bool // streamWriterAt has been closed
+ mtx sync.Mutex // guard internal fields during concurrent calls to WriteAt and Close
+}
+
+// newStreamWriterAt creates a new streamWriterAt.
+func newStreamWriterAt(w io.Writer, partsize int, buf []byte) *streamWriterAt {
+ if partsize == 0 {
+ partsize = 65536
+ }
+ nparts := (len(buf) + partsize - 1) / partsize
+ swa := &streamWriterAt{
+ writer: w,
+ partsize: partsize,
+ buf: buf,
+ partfilled: make([]int, nparts),
+ partready: make(chan []byte, nparts),
+ errWrite: make(chan error, 1),
+ }
+ go swa.writeToWriter()
+ return swa
+}
+
+// Wrote returns the number of bytes written through to the
+// io.Writer.
+//
+// Wrote must not be called until after Close.
+func (swa *streamWriterAt) Wrote() int {
+ return swa.wrote
+}
+
+// Wrote returns the number of bytes passed to WriteAt, regardless of
+// whether they were written through to the io.Writer.
+func (swa *streamWriterAt) WroteAt() int {
+ swa.mtx.Lock()
+ defer swa.mtx.Unlock()
+ return swa.wroteAt
+}
+
+func (swa *streamWriterAt) writeToWriter() {
+ defer close(swa.errWrite)
+ for p := range swa.partready {
+ n, err := swa.writer.Write(p)
+ if err != nil {
+ swa.errWrite <- err
+ return
+ }
+ swa.wrote += n
+ }
+}
+
+// WriteAt implements io.WriterAt. WriteAt is goroutine-safe.
+func (swa *streamWriterAt) WriteAt(p []byte, offset int64) (int, error) {
+ pos := int(offset)
+ n := 0
+ if pos <= len(swa.buf) {
+ n = copy(swa.buf[pos:], p)
+ }
+ if n < len(p) {
+ return n, fmt.Errorf("write beyond end of buffer: offset %d len %d buf %d", offset, len(p), len(swa.buf))
+ }
+ endpos := pos + n
+
+ swa.mtx.Lock()
+ defer swa.mtx.Unlock()
+ swa.wroteAt += len(p)
+ if swa.endpos < endpos {
+ swa.endpos = endpos
+ }
+ if swa.closed {
+ return 0, errors.New("invalid use of closed streamWriterAt")
+ }
+ // Track the number of bytes that landed in each of our
+ // (output) parts.
+ for i := pos; i < endpos; {
+ j := i + swa.partsize - (i % swa.partsize)
+ if j > endpos {
+ j = endpos
+ }
+ pf := swa.partfilled[i/swa.partsize]
+ pf += j - i
+ if pf > swa.partsize {
+ return 0, errors.New("streamWriterAt: overlapping WriteAt calls")
+ }
+ swa.partfilled[i/swa.partsize] = pf
+ i = j
+ }
+ // Flush filled parts to partready.
+ for swa.partnext < len(swa.partfilled) && swa.partfilled[swa.partnext] == swa.partsize {
+ offset := swa.partnext * swa.partsize
+ swa.partready <- swa.buf[offset : offset+swa.partsize]
+ swa.partnext++
+ }
+ return len(p), nil
+}
+
+// Close flushes all buffered data through to the io.Writer.
+func (swa *streamWriterAt) Close() error {
+ swa.mtx.Lock()
+ defer swa.mtx.Unlock()
+ if swa.closed {
+ return errors.New("invalid use of closed streamWriterAt")
+ }
+ swa.closed = true
+ // Flush last part if needed. If the input doesn't end on a
+ // part boundary, the last part never appears "filled" when we
+ // check in WriteAt. But here, we know endpos is the end of
+ // the stream, so we can check whether the last part is ready.
+ if offset := swa.partnext * swa.partsize; offset < swa.endpos && offset+swa.partfilled[swa.partnext] == swa.endpos {
+ swa.partready <- swa.buf[offset:swa.endpos]
+ swa.partnext++
+ }
+ close(swa.partready)
+ err := <-swa.errWrite
+ if err != nil {
+ return err
+ }
+ if swa.wrote != swa.wroteAt {
+ return fmt.Errorf("streamWriterAt: detected hole in input: wrote %d but flushed %d", swa.wroteAt, swa.wrote)
+ }
+ return nil
+}
diff --git a/services/keepstore/streamwriterat_test.go b/services/keepstore/streamwriterat_test.go
new file mode 100644
index 0000000000..fe6837e522
--- /dev/null
+++ b/services/keepstore/streamwriterat_test.go
@@ -0,0 +1,83 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+ "bytes"
+ "sync"
+
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&streamWriterAtSuite{})
+
+type streamWriterAtSuite struct{}
+
+func (s *streamWriterAtSuite) TestPartSizes(c *C) {
+ for partsize := 1; partsize < 5; partsize++ {
+ for writesize := 1; writesize < 5; writesize++ {
+ for datasize := 1; datasize < 100; datasize += 13 {
+ for bufextra := 0; bufextra < 5; bufextra++ {
+ c.Logf("=== partsize %d writesize %d datasize %d bufextra %d", partsize, writesize, datasize, bufextra)
+ outbuf := bytes.NewBuffer(nil)
+ indata := make([]byte, datasize)
+ for i := range indata {
+ indata[i] = byte(i)
+ }
+ swa := newStreamWriterAt(outbuf, partsize, make([]byte, datasize+bufextra))
+ var wg sync.WaitGroup
+ for pos := 0; pos < datasize; pos += writesize {
+ pos := pos
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ endpos := pos + writesize
+ if endpos > datasize {
+ endpos = datasize
+ }
+ swa.WriteAt(indata[pos:endpos], int64(pos))
+ }()
+ }
+ wg.Wait()
+ swa.Close()
+ c.Check(outbuf.Bytes(), DeepEquals, indata)
+ }
+ }
+ }
+ }
+}
+
+func (s *streamWriterAtSuite) TestOverflow(c *C) {
+ for offset := -1; offset < 2; offset++ {
+ buf := make([]byte, 50)
+ swa := newStreamWriterAt(bytes.NewBuffer(nil), 20, buf)
+ _, err := swa.WriteAt([]byte("foo"), int64(len(buf)+offset))
+ c.Check(err, NotNil)
+ err = swa.Close()
+ c.Check(err, IsNil)
+ }
+}
+
+func (s *streamWriterAtSuite) TestIncompleteWrite(c *C) {
+ for _, partsize := range []int{20, 25} {
+ for _, bufsize := range []int{50, 55, 60} {
+ for offset := 0; offset < 3; offset++ {
+ swa := newStreamWriterAt(bytes.NewBuffer(nil), partsize, make([]byte, bufsize))
+ _, err := swa.WriteAt(make([]byte, 1), 49)
+ c.Check(err, IsNil)
+ _, err = swa.WriteAt(make([]byte, 46), int64(offset))
+ c.Check(err, IsNil)
+ err = swa.Close()
+ c.Check(err, NotNil)
+ c.Check(swa.WroteAt(), Equals, 47)
+ if offset == 0 {
+ c.Check(swa.Wrote(), Equals, 40/partsize*partsize)
+ } else {
+ c.Check(swa.Wrote(), Equals, 0)
+ }
+ }
+ }
+ }
+}
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
index 3909d90d92..819c25acc1 100644
--- a/services/keepstore/trash_worker.go
+++ b/services/keepstore/trash_worker.go
@@ -5,71 +5,183 @@
package keepstore
import (
- "errors"
+ "context"
+ "sync"
+ "sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "github.com/sirupsen/logrus"
+ "github.com/prometheus/client_golang/prometheus"
)
-// RunTrashWorker is used by Keepstore to initiate trash worker channel goroutine.
-// The channel will process trash list.
-// For each (next) trash request:
-// Delete the block indicated by the trash request Locator
-// Repeat
-//
-func RunTrashWorker(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arvados.Cluster, trashq *WorkQueue) {
- for item := range trashq.NextItem {
- trashRequest := item.(TrashRequest)
- TrashItem(volmgr, logger, cluster, trashRequest)
- trashq.DoneItem <- struct{}{}
- }
+type TrashListItem struct {
+ Locator string `json:"locator"`
+ BlockMtime int64 `json:"block_mtime"`
+ MountUUID string `json:"mount_uuid"` // Target mount, or "" for "everywhere"
+}
+
+type trasher struct {
+ keepstore *keepstore
+ todo []TrashListItem
+ cond *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+ inprogress atomic.Int64
}
-// TrashItem deletes the indicated block from every writable volume.
-func TrashItem(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arvados.Cluster, trashRequest TrashRequest) {
- reqMtime := time.Unix(0, trashRequest.BlockMtime)
- if time.Since(reqMtime) < cluster.Collections.BlobSigningTTL.Duration() {
- logger.Warnf("client asked to delete a %v old block %v (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
- arvados.Duration(time.Since(reqMtime)),
- trashRequest.Locator,
- trashRequest.BlockMtime,
- reqMtime,
- cluster.Collections.BlobSigningTTL)
- return
+func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *trasher {
+ t := &trasher{
+ keepstore: keepstore,
+ cond: sync.NewCond(&sync.Mutex{}),
+ }
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "trash_queue_pending_entries",
+ Help: "Number of queued trash requests",
+ },
+ func() float64 {
+ t.cond.L.Lock()
+ defer t.cond.L.Unlock()
+ return float64(len(t.todo))
+ },
+ ))
+ reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "keepstore",
+ Name: "trash_queue_inprogress_entries",
+ Help: "Number of trash requests in progress",
+ },
+ func() float64 {
+ return float64(t.inprogress.Load())
+ },
+ ))
+ if !keepstore.cluster.Collections.BlobTrash {
+ keepstore.logger.Info("not running trash worker because Collections.BlobTrash == false")
+ return t
}
- var volumes []*VolumeMount
- if uuid := trashRequest.MountUUID; uuid == "" {
- volumes = volmgr.AllWritable()
- } else if mnt := volmgr.Lookup(uuid, true); mnt == nil {
- logger.Warnf("trash request for nonexistent mount: %v", trashRequest)
- return
+ var mntsAllowTrash []*mount
+ for _, mnt := range t.keepstore.mounts {
+ if mnt.AllowTrash {
+ mntsAllowTrash = append(mntsAllowTrash, mnt)
+ }
+ }
+ if len(mntsAllowTrash) == 0 {
+ t.keepstore.logger.Info("not running trash worker because there are no writable or trashable volumes")
} else {
- volumes = []*VolumeMount{mnt}
+ for i := 0; i < keepstore.cluster.Collections.BlobTrashConcurrency; i++ {
+ go t.runWorker(ctx, mntsAllowTrash)
+ }
}
+ return t
+}
+
+func (t *trasher) SetTrashList(newlist []TrashListItem) {
+ t.cond.L.Lock()
+ t.todo = newlist
+ t.cond.L.Unlock()
+ t.cond.Broadcast()
+}
- for _, volume := range volumes {
- mtime, err := volume.Mtime(trashRequest.Locator)
- if err != nil {
- logger.WithError(err).Errorf("%v Trash(%v)", volume, trashRequest.Locator)
- continue
+func (t *trasher) runWorker(ctx context.Context, mntsAllowTrash []*mount) {
+ go func() {
+ <-ctx.Done()
+ t.cond.Broadcast()
+ }()
+ for {
+ t.cond.L.Lock()
+ for len(t.todo) == 0 && ctx.Err() == nil {
+ t.cond.Wait()
}
- if trashRequest.BlockMtime != mtime.UnixNano() {
- logger.Infof("%v Trash(%v): stored mtime %v does not match trash list value %v; skipping", volume, trashRequest.Locator, mtime.UnixNano(), trashRequest.BlockMtime)
- continue
+ if ctx.Err() != nil {
+ t.cond.L.Unlock()
+ return
}
+ item := t.todo[0]
+ t.todo = t.todo[1:]
+ t.inprogress.Add(1)
+ t.cond.L.Unlock()
- if !cluster.Collections.BlobTrash {
- err = errors.New("skipping because Collections.BlobTrash is false")
- } else {
- err = volume.Trash(trashRequest.Locator)
- }
+ func() {
+ defer t.inprogress.Add(-1)
+ logger := t.keepstore.logger.WithField("locator", item.Locator)
- if err != nil {
- logger.WithError(err).Errorf("%v Trash(%v)", volume, trashRequest.Locator)
- } else {
- logger.Infof("%v Trash(%v) OK", volume, trashRequest.Locator)
- }
+ li, err := getLocatorInfo(item.Locator)
+ if err != nil {
+ logger.Warn("ignoring trash request for invalid locator")
+ return
+ }
+
+ reqMtime := time.Unix(0, item.BlockMtime)
+ if time.Since(reqMtime) < t.keepstore.cluster.Collections.BlobSigningTTL.Duration() {
+ logger.Warnf("client asked to delete a %v old block (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
+ arvados.Duration(time.Since(reqMtime)),
+ item.BlockMtime,
+ reqMtime,
+ t.keepstore.cluster.Collections.BlobSigningTTL)
+ return
+ }
+
+ var mnts []*mount
+ if item.MountUUID == "" {
+ mnts = mntsAllowTrash
+ } else if mnt := t.keepstore.mounts[item.MountUUID]; mnt == nil {
+ logger.Warnf("ignoring trash request for nonexistent mount %s", item.MountUUID)
+ return
+ } else if !mnt.AllowTrash {
+ logger.Warnf("ignoring trash request for readonly mount %s with AllowTrashWhenReadOnly==false", item.MountUUID)
+ return
+ } else {
+ mnts = []*mount{mnt}
+ }
+
+ for _, mnt := range mnts {
+ logger := logger.WithField("mount", mnt.UUID)
+ mtime, err := mnt.Mtime(li.hash)
+ if err != nil {
+ logger.WithError(err).Error("error getting stored mtime")
+ continue
+ }
+ if !mtime.Equal(reqMtime) {
+ logger.Infof("stored mtime (%v) does not match trash list mtime (%v); skipping", mtime, reqMtime)
+ continue
+ }
+ err = mnt.BlockTrash(li.hash)
+ if err != nil {
+ logger.WithError(err).Info("error trashing block")
+ continue
+ }
+ logger.Info("block trashed")
+ }
+ }()
}
}
+
+type trashEmptier struct{}
+
+func newTrashEmptier(ctx context.Context, ks *keepstore, reg *prometheus.Registry) *trashEmptier {
+ d := ks.cluster.Collections.BlobTrashCheckInterval.Duration()
+ if d <= 0 ||
+ !ks.cluster.Collections.BlobTrash ||
+ ks.cluster.Collections.BlobDeleteConcurrency <= 0 {
+ ks.logger.Infof("not running trash emptier because disabled by config (enabled=%t, interval=%v, concurrency=%d)", ks.cluster.Collections.BlobTrash, d, ks.cluster.Collections.BlobDeleteConcurrency)
+ return &trashEmptier{}
+ }
+ go func() {
+ ticker := time.NewTicker(d)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ }
+ for _, mnt := range ks.mounts {
+ if mnt.KeepMount.AllowTrash {
+ mnt.volume.EmptyTrash()
+ }
+ }
+ }
+ }()
+ return &trashEmptier{}
+}
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
index 4e20c3feb4..0c304dbade 100644
--- a/services/keepstore/trash_worker_test.go
+++ b/services/keepstore/trash_worker_test.go
@@ -5,364 +5,198 @@
package keepstore
import (
- "container/list"
"context"
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "sort"
"time"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/prometheus/client_golang/prometheus"
- check "gopkg.in/check.v1"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ . "gopkg.in/check.v1"
)
-type TrashWorkerTestData struct {
- Locator1 string
- Block1 []byte
- BlockMtime1 int64
-
- Locator2 string
- Block2 []byte
- BlockMtime2 int64
-
- CreateData bool
- CreateInVolume1 bool
-
- UseTrashLifeTime bool
- DifferentMtimes bool
-
- DeleteLocator string
- SpecifyMountUUID bool
-
- ExpectLocator1 bool
- ExpectLocator2 bool
-}
-
-/* Delete block that does not exist in any of the keep volumes.
- Expect no errors.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_GetNonExistingLocator(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: "5d41402abc4b2a76b9719d911017c592",
- Block1: []byte("hello"),
-
- Locator2: "5d41402abc4b2a76b9719d911017c592",
- Block2: []byte("hello"),
-
- CreateData: false,
-
- DeleteLocator: "5d41402abc4b2a76b9719d911017c592",
-
- ExpectLocator1: false,
- ExpectLocator2: false,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Delete a block that exists on volume 1 of the keep servers.
- Expect the second locator in volume 2 to be unaffected.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume1(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash2,
- Block2: TestBlock2,
-
- CreateData: true,
-
- DeleteLocator: TestHash, // first locator
-
- ExpectLocator1: false,
- ExpectLocator2: true,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Delete a block that exists on volume 2 of the keep servers.
- Expect the first locator in volume 1 to be unaffected.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume2(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash2,
- Block2: TestBlock2,
-
- CreateData: true,
-
- DeleteLocator: TestHash2, // locator 2
-
- ExpectLocator1: true,
- ExpectLocator2: false,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Delete a block with matching mtime for locator in both volumes.
- Expect locator to be deleted from both volumes.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInBothVolumes(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash,
- Block2: TestBlock,
-
- CreateData: true,
-
- DeleteLocator: TestHash,
-
- ExpectLocator1: false,
- ExpectLocator2: false,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Same locator with different Mtimes exists in both volumes.
- Delete the second and expect the first to be still around.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_MtimeMatchesForLocator1ButNotForLocator2(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash,
- Block2: TestBlock,
-
- CreateData: true,
- DifferentMtimes: true,
-
- DeleteLocator: TestHash,
-
- ExpectLocator1: true,
- ExpectLocator2: false,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on both volumes with matching mtimes,
-// but specify a MountUUID in the request so it only gets deleted from
-// the first volume.
-func (s *HandlerSuite) TestTrashWorkerIntegration_SpecifyMountUUID(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash,
- Block2: TestBlock,
-
- CreateData: true,
-
- DeleteLocator: TestHash,
- SpecifyMountUUID: true,
-
- ExpectLocator1: true,
- ExpectLocator2: true,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Two different locators in volume 1.
- Delete one of them.
- Expect the other unaffected.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_TwoDifferentLocatorsInVolume1(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash2,
- Block2: TestBlock2,
-
- CreateData: true,
- CreateInVolume1: true,
-
- DeleteLocator: TestHash, // locator 1
-
- ExpectLocator1: false,
- ExpectLocator2: true,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Allow default Trash Life time to be used. Thus, the newly created block
- will not be deleted because its Mtime is within the trash life time.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_SameLocatorInTwoVolumesWithDefaultTrashLifeTime(c *check.C) {
- s.cluster.Collections.BlobTrash = true
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash2,
- Block2: TestBlock2,
-
- CreateData: true,
- CreateInVolume1: true,
-
- UseTrashLifeTime: true,
-
- DeleteLocator: TestHash, // locator 1
-
- // Since trash life time is in effect, block won't be deleted.
- ExpectLocator1: true,
- ExpectLocator2: true,
- }
- s.performTrashWorkerTest(c, testData)
-}
-
-/* Delete a block with matching mtime for locator in both volumes, but EnableDelete is false,
- so block won't be deleted.
-*/
-func (s *HandlerSuite) TestTrashWorkerIntegration_DisabledDelete(c *check.C) {
+func (s *routerSuite) TestTrashList_Clear(c *C) {
s.cluster.Collections.BlobTrash = false
- testData := TrashWorkerTestData{
- Locator1: TestHash,
- Block1: TestBlock,
-
- Locator2: TestHash,
- Block2: TestBlock,
-
- CreateData: true,
-
- DeleteLocator: TestHash,
-
- ExpectLocator1: true,
- ExpectLocator2: true,
- }
- s.performTrashWorkerTest(c, testData)
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ resp := call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, []byte(`
+ [
+ {
+ "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
+ "block_mtime":1707249451308502672,
+ "mount_uuid":"zzzzz-nyw5e-000000000000000"
+ }
+ ]
+ `), nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(router.trasher.todo, DeepEquals, []TrashListItem{{
+ Locator: "acbd18db4cc2f85cedef654fccc4a4d8+3",
+ BlockMtime: 1707249451308502672,
+ MountUUID: "zzzzz-nyw5e-000000000000000",
+ }})
+
+ resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, []byte("[]"), nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ c.Check(router.trasher.todo, HasLen, 0)
}
-/* Perform the test */
-func (s *HandlerSuite) performTrashWorkerTest(c *check.C, testData TrashWorkerTestData) {
- c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
- // Replace the router's trashq -- which the worker goroutines
- // started by setup() are now receiving from -- with a new
- // one, so we can see what the handler sends to it.
- trashq := NewWorkQueue()
- s.handler.Handler.(*router).trashq = trashq
-
- // Put test content
- mounts := s.handler.volmgr.AllWritable()
- if testData.CreateData {
- mounts[0].Put(context.Background(), testData.Locator1, testData.Block1)
- mounts[0].Put(context.Background(), testData.Locator1+".meta", []byte("metadata"))
-
- if testData.CreateInVolume1 {
- mounts[0].Put(context.Background(), testData.Locator2, testData.Block2)
- mounts[0].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
- } else {
- mounts[1].Put(context.Background(), testData.Locator2, testData.Block2)
- mounts[1].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
- }
- }
-
- oldBlockTime := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Minute)
-
- // Create TrashRequest for the test
- trashRequest := TrashRequest{
- Locator: testData.DeleteLocator,
- BlockMtime: oldBlockTime.UnixNano(),
- }
- if testData.SpecifyMountUUID {
- trashRequest.MountUUID = s.handler.volmgr.Mounts()[0].UUID
- }
-
- // Run trash worker and put the trashRequest on trashq
- trashList := list.New()
- trashList.PushBack(trashRequest)
-
- if !testData.UseTrashLifeTime {
- // Trash worker would not delete block if its Mtime is
- // within trash life time. Back-date the block to
- // allow the deletion to succeed.
- for _, mnt := range mounts {
- mnt.Volume.(*MockVolume).Timestamps[testData.DeleteLocator] = oldBlockTime
- if testData.DifferentMtimes {
- oldBlockTime = oldBlockTime.Add(time.Second)
+func (s *routerSuite) TestTrashList_Execute(c *C) {
+ s.cluster.Collections.BlobTrashConcurrency = 1
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+ "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+ "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true},
+ "zzzzz-nyw5e-333333333333333": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+ }
+ router, cancel := testRouter(c, s.cluster, nil)
+ defer cancel()
+
+ var mounts []struct {
+ UUID string
+ DeviceID string `json:"device_id"`
+ }
+ resp := call(router, "GET", "http://example/mounts", s.cluster.SystemRootToken, nil, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+ err := json.Unmarshal(resp.Body.Bytes(), &mounts)
+ c.Assert(err, IsNil)
+ c.Assert(mounts, HasLen, 4)
+
+ // Sort mounts by UUID
+ sort.Slice(mounts, func(i, j int) bool {
+ return mounts[i].UUID < mounts[j].UUID
+ })
+
+ // Make vols (stub volumes) in same order as mounts
+ var vols []*stubVolume
+ for _, mount := range mounts {
+ vols = append(vols, router.keepstore.mounts[mount.UUID].volume.(*stubVolume))
+ }
+
+ // The "trial" loop below will construct the trashList which
+ // we'll send to trasher via router, plus a slice of checks
+ // which we'll run after the trasher has finished executing
+ // the list.
+ var trashList []TrashListItem
+ var checks []func()
+
+ tNew := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() / 2)
+ tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
+
+ for _, trial := range []struct {
+ comment string
+ storeMtime []time.Time
+ trashListItems []TrashListItem
+ expectData []bool
+ }{
+ {
+ comment: "timestamp matches, but is not old enough to trash => skip",
+ storeMtime: []time.Time{tNew},
+ trashListItems: []TrashListItem{
+ {
+ BlockMtime: tNew.UnixNano(),
+ MountUUID: mounts[0].UUID,
+ },
+ },
+ expectData: []bool{true},
+ },
+ {
+ comment: "timestamp matches, and is old enough => trash",
+ storeMtime: []time.Time{tOld},
+ trashListItems: []TrashListItem{
+ {
+ BlockMtime: tOld.UnixNano(),
+ MountUUID: mounts[0].UUID,
+ },
+ },
+ expectData: []bool{false},
+ },
+ {
+ comment: "timestamp matches and is old enough on mount 0, but the request specifies mount 1, where timestamp does not match => skip",
+ storeMtime: []time.Time{tOld, tOld.Add(-time.Second)},
+ trashListItems: []TrashListItem{
+ {
+ BlockMtime: tOld.UnixNano(),
+ MountUUID: mounts[1].UUID,
+ },
+ },
+ expectData: []bool{true, true},
+ },
+ {
+ comment: "MountUUID unspecified => trash from any mount where timestamp matches, leave alone elsewhere",
+ storeMtime: []time.Time{tOld, tOld.Add(-time.Second)},
+ trashListItems: []TrashListItem{
+ {
+ BlockMtime: tOld.UnixNano(),
+ },
+ },
+ expectData: []bool{false, true},
+ },
+ {
+ comment: "MountUUID unspecified => trash from multiple mounts if timestamp matches, but skip readonly volumes unless AllowTrashWhenReadOnly",
+ storeMtime: []time.Time{tOld, tOld, tOld, tOld},
+ trashListItems: []TrashListItem{
+ {
+ BlockMtime: tOld.UnixNano(),
+ },
+ },
+ expectData: []bool{false, false, true, false},
+ },
+ {
+ comment: "readonly MountUUID specified => skip",
+ storeMtime: []time.Time{tOld, tOld, tOld},
+ trashListItems: []TrashListItem{
+ {
+ BlockMtime: tOld.UnixNano(),
+ MountUUID: mounts[2].UUID,
+ },
+ },
+ expectData: []bool{true, true, true},
+ },
+ } {
+ trial := trial
+ data := []byte(fmt.Sprintf("trial %+v", trial))
+ hash := fmt.Sprintf("%x", md5.Sum(data))
+ for i, t := range trial.storeMtime {
+ if t.IsZero() {
+ continue
}
+ err := vols[i].BlockWrite(context.Background(), hash, data)
+ c.Assert(err, IsNil)
+ err = vols[i].blockTouchWithTime(hash, t)
+ c.Assert(err, IsNil)
}
- }
- go RunTrashWorker(s.handler.volmgr, ctxlog.TestLogger(c), s.cluster, trashq)
-
- // Install gate so all local operations block until we say go
- gate := make(chan struct{})
- for _, mnt := range mounts {
- mnt.Volume.(*MockVolume).Gate = gate
- }
-
- assertStatusItem := func(k string, expect float64) {
- if v := getStatusItem(s.handler, "TrashQueue", k); v != expect {
- c.Errorf("Got %s %v, expected %v", k, v, expect)
- }
- }
-
- assertStatusItem("InProgress", 0)
- assertStatusItem("Queued", 0)
-
- listLen := trashList.Len()
- trashq.ReplaceQueue(trashList)
-
- // Wait for worker to take request(s)
- expectEqualWithin(c, time.Second, listLen, func() interface{} { return trashq.Status().InProgress })
-
- // Ensure status.json also reports work is happening
- assertStatusItem("InProgress", float64(1))
- assertStatusItem("Queued", float64(listLen-1))
-
- // Let worker proceed
- close(gate)
-
- // Wait for worker to finish
- expectEqualWithin(c, time.Second, 0, func() interface{} { return trashq.Status().InProgress })
-
- // Verify Locator1 to be un/deleted as expected
- buf := make([]byte, BlockSize)
- size, err := GetBlock(context.Background(), s.handler.volmgr, testData.Locator1, buf, nil)
- if testData.ExpectLocator1 {
- if size == 0 || err != nil {
- c.Errorf("Expected Locator1 to be still present: %s", testData.Locator1)
+ for _, item := range trial.trashListItems {
+ item.Locator = fmt.Sprintf("%s+%d", hash, len(data))
+ trashList = append(trashList, item)
}
- } else {
- if size > 0 || err == nil {
- c.Errorf("Expected Locator1 to be deleted: %s", testData.Locator1)
+ for i, expect := range trial.expectData {
+ i, expect := i, expect
+ checks = append(checks, func() {
+ ent := vols[i].data[hash]
+ dataPresent := ent.data != nil && ent.trash.IsZero()
+ c.Check(dataPresent, Equals, expect, Commentf("%s mount %d (%s) expect present=%v but got len(ent.data)=%d ent.trash=%v // %s\nlog:\n%s", hash, i, vols[i].params.UUID, expect, len(ent.data), !ent.trash.IsZero(), trial.comment, vols[i].stubLog.String()))
+ })
}
}
- // Verify Locator2 to be un/deleted as expected
- if testData.Locator1 != testData.Locator2 {
- size, err = GetBlock(context.Background(), s.handler.volmgr, testData.Locator2, buf, nil)
- if testData.ExpectLocator2 {
- if size == 0 || err != nil {
- c.Errorf("Expected Locator2 to be still present: %s", testData.Locator2)
- }
- } else {
- if size > 0 || err == nil {
- c.Errorf("Expected Locator2 to be deleted: %s", testData.Locator2)
- }
+ listjson, err := json.Marshal(trashList)
+ resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, listjson, nil)
+ c.Check(resp.Code, Equals, http.StatusOK)
+
+ for {
+ router.trasher.cond.L.Lock()
+ todolen := len(router.trasher.todo)
+ router.trasher.cond.L.Unlock()
+ if todolen == 0 && router.trasher.inprogress.Load() == 0 {
+ break
}
+ time.Sleep(time.Millisecond)
}
- // The DifferentMtimes test puts the same locator in two
- // different volumes, but only one copy has an Mtime matching
- // the trash request.
- if testData.DifferentMtimes {
- locatorFoundIn := 0
- for _, volume := range s.handler.volmgr.AllReadable() {
- buf := make([]byte, BlockSize)
- if _, err := volume.Get(context.Background(), testData.Locator1, buf); err == nil {
- locatorFoundIn = locatorFoundIn + 1
- }
- }
- c.Check(locatorFoundIn, check.Equals, 1)
+ for _, check := range checks {
+ check()
}
}
diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index dd62cf1319..92cf12ac18 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -28,20 +28,27 @@ import (
)
func init() {
- driver["Directory"] = newDirectoryVolume
+ driver["Directory"] = newUnixVolume
}
-func newDirectoryVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- v := &UnixVolume{cluster: cluster, volume: volume, logger: logger, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, &v)
+func newUnixVolume(params newVolumeParams) (volume, error) {
+ v := &unixVolume{
+ uuid: params.UUID,
+ cluster: params.Cluster,
+ volume: params.ConfigVolume,
+ logger: params.Logger,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
+ }
+ err := json.Unmarshal(params.ConfigVolume.DriverParameters, &v)
if err != nil {
return nil, err
}
- v.logger = v.logger.WithField("Volume", v.String())
+ v.logger = v.logger.WithField("Volume", v.DeviceID())
return v, v.check()
}
-func (v *UnixVolume) check() error {
+func (v *unixVolume) check() error {
if v.Root == "" {
return errors.New("DriverParameters.Root was not provided")
}
@@ -53,22 +60,24 @@ func (v *UnixVolume) check() error {
}
// Set up prometheus metrics
- lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
+ lbls := prometheus.Labels{"device_id": v.DeviceID()}
v.os.stats.opsCounters, v.os.stats.errCounters, v.os.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
_, err := v.os.Stat(v.Root)
return err
}
-// A UnixVolume stores and retrieves blocks in a local directory.
-type UnixVolume struct {
+// A unixVolume stores and retrieves blocks in a local directory.
+type unixVolume struct {
Root string // path to the volume's root directory
Serialize bool
- cluster *arvados.Cluster
- volume arvados.Volume
- logger logrus.FieldLogger
- metrics *volumeMetricsVecs
+ uuid string
+ cluster *arvados.Cluster
+ volume arvados.Volume
+ logger logrus.FieldLogger
+ metrics *volumeMetricsVecs
+ bufferPool *bufferPool
// something to lock during IO, typically a sync.Mutex (or nil
// to skip locking)
@@ -77,15 +86,16 @@ type UnixVolume struct {
os osWithStats
}
-// GetDeviceID returns a globally unique ID for the volume's root
+// DeviceID returns a globally unique ID for the volume's root
// directory, consisting of the filesystem's UUID and the path from
// filesystem root to storage directory, joined by "/". For example,
// the device ID for a local directory "/mnt/xvda1/keep" might be
// "fa0b6166-3b55-4994-bd3f-92f4e00a1bb0/keep".
-func (v *UnixVolume) GetDeviceID() string {
+func (v *unixVolume) DeviceID() string {
giveup := func(f string, args ...interface{}) string {
- v.logger.Infof(f+"; using blank DeviceID for volume %s", append(args, v)...)
- return ""
+ v.logger.Infof(f+"; using hostname:path for volume %s", append(args, v.uuid)...)
+ host, _ := os.Hostname()
+ return host + ":" + v.Root
}
buf, err := exec.Command("findmnt", "--noheadings", "--target", v.Root).CombinedOutput()
if err != nil {
@@ -154,12 +164,9 @@ func (v *UnixVolume) GetDeviceID() string {
return giveup("could not find entry in %q matching %q", udir, dev)
}
-// Touch sets the timestamp for the given locator to the current time
-func (v *UnixVolume) Touch(loc string) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- p := v.blockPath(loc)
+// BlockTouch sets the timestamp for the given locator to the current time
+func (v *unixVolume) BlockTouch(hash string) error {
+ p := v.blockPath(hash)
f, err := v.os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
@@ -182,7 +189,7 @@ func (v *UnixVolume) Touch(loc string) error {
}
// Mtime returns the stored timestamp for the given locator.
-func (v *UnixVolume) Mtime(loc string) (time.Time, error) {
+func (v *unixVolume) Mtime(loc string) (time.Time, error) {
p := v.blockPath(loc)
fi, err := v.os.Stat(p)
if err != nil {
@@ -191,94 +198,59 @@ func (v *UnixVolume) Mtime(loc string) (time.Time, error) {
return fi.ModTime(), nil
}
-// Lock the locker (if one is in use), open the file for reading, and
-// call the given function if and when the file is ready to read.
-func (v *UnixVolume) getFunc(ctx context.Context, path string, fn func(io.Reader) error) error {
- if err := v.lock(ctx); err != nil {
- return err
- }
- defer v.unlock()
- f, err := v.os.Open(path)
- if err != nil {
- return err
- }
- defer f.Close()
- return fn(NewCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes))
-}
-
// stat is os.Stat() with some extra sanity checks.
-func (v *UnixVolume) stat(path string) (os.FileInfo, error) {
+func (v *unixVolume) stat(path string) (os.FileInfo, error) {
stat, err := v.os.Stat(path)
if err == nil {
if stat.Size() < 0 {
err = os.ErrInvalid
} else if stat.Size() > BlockSize {
- err = TooLongError
+ err = errTooLarge
}
}
return stat, err
}
-// Get retrieves a block, copies it to the given slice, and returns
-// the number of bytes copied.
-func (v *UnixVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- return getWithPipe(ctx, loc, buf, v)
-}
-
-// ReadBlock implements BlockReader.
-func (v *UnixVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
- path := v.blockPath(loc)
+// BlockRead reads a block from the volume.
+func (v *unixVolume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+ path := v.blockPath(hash)
stat, err := v.stat(path)
if err != nil {
return v.translateError(err)
}
- return v.getFunc(ctx, path, func(rdr io.Reader) error {
- n, err := io.Copy(w, rdr)
- if err == nil && n != stat.Size() {
- err = io.ErrUnexpectedEOF
- }
+ if err := v.lock(ctx); err != nil {
return err
- })
-}
-
-// Compare returns nil if Get(loc) would return the same content as
-// expect. It is functionally equivalent to Get() followed by
-// bytes.Compare(), but uses less memory.
-func (v *UnixVolume) Compare(ctx context.Context, loc string, expect []byte) error {
- path := v.blockPath(loc)
- if _, err := v.stat(path); err != nil {
- return v.translateError(err)
}
- return v.getFunc(ctx, path, func(rdr io.Reader) error {
- return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
- })
-}
-
-// Put stores a block of data identified by the locator string
-// "loc". It returns nil on success. If the volume is full, it
-// returns a FullError. If the write fails due to some other error,
-// that error is returned.
-func (v *UnixVolume) Put(ctx context.Context, loc string, block []byte) error {
- return putWithPipe(ctx, loc, block, v)
+ defer v.unlock()
+ f, err := v.os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ src := newCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes)
+ dst := io.NewOffsetWriter(w, 0)
+ n, err := io.Copy(dst, src)
+ if err == nil && n != stat.Size() {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
}
-// WriteBlock implements BlockWriter.
-func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
+// BlockWrite stores a block on the volume. If it already exists, its
+// timestamp is updated.
+func (v *unixVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+ if v.isFull() {
+ return errFull
}
- if v.IsFull() {
- return FullError
- }
- bdir := v.blockDir(loc)
+ bdir := v.blockDir(hash)
if err := os.MkdirAll(bdir, 0755); err != nil {
return fmt.Errorf("error creating directory %s: %s", bdir, err)
}
- bpath := v.blockPath(loc)
- tmpfile, err := v.os.TempFile(bdir, "tmp"+loc)
+ bpath := v.blockPath(hash)
+ tmpfile, err := v.os.TempFile(bdir, "tmp"+hash)
if err != nil {
- return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, err)
+ return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, hash, err)
}
defer v.os.Remove(tmpfile.Name())
defer tmpfile.Close()
@@ -287,7 +259,7 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
return err
}
defer v.unlock()
- n, err := io.Copy(tmpfile, rdr)
+ n, err := tmpfile.Write(data)
v.os.stats.TickOutBytes(uint64(n))
if err != nil {
return fmt.Errorf("error writing %s: %s", bpath, err)
@@ -312,58 +284,10 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
return nil
}
-// Status returns a VolumeStatus struct describing the volume's
-// current state, or nil if an error occurs.
-//
-func (v *UnixVolume) Status() *VolumeStatus {
- fi, err := v.os.Stat(v.Root)
- if err != nil {
- v.logger.WithError(err).Error("stat failed")
- return nil
- }
- // uint64() cast here supports GOOS=darwin where Dev is
- // int32. If the device number is negative, the unsigned
- // devnum won't be the real device number any more, but that's
- // fine -- all we care about is getting the same number each
- // time.
- devnum := uint64(fi.Sys().(*syscall.Stat_t).Dev)
-
- var fs syscall.Statfs_t
- if err := syscall.Statfs(v.Root, &fs); err != nil {
- v.logger.WithError(err).Error("statfs failed")
- return nil
- }
- // These calculations match the way df calculates disk usage:
- // "free" space is measured by fs.Bavail, but "used" space
- // uses fs.Blocks - fs.Bfree.
- free := fs.Bavail * uint64(fs.Bsize)
- used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
- return &VolumeStatus{
- MountPoint: v.Root,
- DeviceNum: devnum,
- BytesFree: free,
- BytesUsed: used,
- }
-}
-
var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)
var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
-// IndexTo writes (to the given Writer) a list of blocks found on this
-// volume which begin with the specified prefix. If the prefix is an
-// empty string, IndexTo writes a complete list of blocks.
-//
-// Each block is given in the format
-//
-// locator+size modification-time {newline}
-//
-// e.g.:
-//
-// e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
-// e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
-// e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
-//
-func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
+func (v *unixVolume) Index(ctx context.Context, prefix string, w io.Writer) error {
rootdir, err := v.os.Open(v.Root)
if err != nil {
return err
@@ -376,6 +300,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
return err
}
for _, subdir := range subdirs {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
if !strings.HasPrefix(subdir, prefix) && !strings.HasPrefix(prefix, subdir) {
// prefix excludes all blocks stored in this dir
continue
@@ -390,7 +317,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
v.os.stats.TickOps("readdir")
v.os.stats.Tick(&v.os.stats.ReaddirOps)
dirents, err = os.ReadDir(blockdirpath)
- if err == nil {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ } else if err == nil {
break
} else if attempt < 5 && strings.Contains(err.Error(), "errno 523") {
// EBADCOOKIE (NFS stopped accepting
@@ -404,6 +333,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
}
for _, dirent := range dirents {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
fileInfo, err := dirent.Info()
if os.IsNotExist(err) {
// File disappeared between ReadDir() and now
@@ -432,11 +364,11 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
return nil
}
-// Trash trashes the block data from the unix storage
-// If BlobTrashLifetime == 0, the block is deleted
-// Else, the block is renamed as path/{loc}.trash.{deadline},
-// where deadline = now + BlobTrashLifetime
-func (v *UnixVolume) Trash(loc string) error {
+// BlockTrash trashes the block data from the unix storage. If
+// BlobTrashLifetime == 0, the block is deleted; otherwise, the block
+// is renamed as path/{loc}.trash.{deadline}, where deadline = now +
+// BlobTrashLifetime.
+func (v *unixVolume) BlockTrash(loc string) error {
// Touch() must be called before calling Write() on a block. Touch()
// also uses lockfile(). This avoids a race condition between Write()
// and Trash() because either (a) the file will be trashed and Touch()
@@ -444,10 +376,6 @@ func (v *UnixVolume) Trash(loc string) error {
// be re-written), or (b) Touch() will update the file's timestamp and
// Trash() will read the correct up-to-date timestamp and choose not to
// trash the file.
-
- if v.volume.ReadOnly || !v.cluster.Collections.BlobTrash {
- return MethodDisabledError
- }
if err := v.lock(context.TODO()); err != nil {
return err
}
@@ -480,17 +408,13 @@ func (v *UnixVolume) Trash(loc string) error {
return v.os.Rename(p, fmt.Sprintf("%v.trash.%d", p, time.Now().Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Unix()))
}
-// Untrash moves block from trash back into store
+// BlockUntrash moves block from trash back into store
// Look for path/{loc}.trash.{deadline} in storage,
// and rename the first such file as path/{loc}
-func (v *UnixVolume) Untrash(loc string) (err error) {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
-
+func (v *unixVolume) BlockUntrash(hash string) error {
v.os.stats.TickOps("readdir")
v.os.stats.Tick(&v.os.stats.ReaddirOps)
- files, err := ioutil.ReadDir(v.blockDir(loc))
+ files, err := ioutil.ReadDir(v.blockDir(hash))
if err != nil {
return err
}
@@ -500,11 +424,11 @@ func (v *UnixVolume) Untrash(loc string) (err error) {
}
foundTrash := false
- prefix := fmt.Sprintf("%v.trash.", loc)
+ prefix := fmt.Sprintf("%v.trash.", hash)
for _, f := range files {
if strings.HasPrefix(f.Name(), prefix) {
foundTrash = true
- err = v.os.Rename(v.blockPath(f.Name()), v.blockPath(loc))
+ err = v.os.Rename(v.blockPath(f.Name()), v.blockPath(hash))
if err == nil {
break
}
@@ -515,25 +439,24 @@ func (v *UnixVolume) Untrash(loc string) (err error) {
return os.ErrNotExist
}
- return
+ return nil
}
// blockDir returns the fully qualified directory name for the directory
// where loc is (or would be) stored on this volume.
-func (v *UnixVolume) blockDir(loc string) string {
+func (v *unixVolume) blockDir(loc string) string {
return filepath.Join(v.Root, loc[0:3])
}
// blockPath returns the fully qualified pathname for the path to loc
// on this volume.
-func (v *UnixVolume) blockPath(loc string) string {
+func (v *unixVolume) blockPath(loc string) string {
return filepath.Join(v.blockDir(loc), loc)
}
-// IsFull returns true if the free space on the volume is less than
+// isFull returns true if the free space on the volume is less than
// MinFreeKilobytes.
-//
-func (v *UnixVolume) IsFull() (isFull bool) {
+func (v *unixVolume) isFull() (isFull bool) {
fullSymlink := v.Root + "/full"
// Check if the volume has been marked as full in the last hour.
@@ -547,9 +470,9 @@ func (v *UnixVolume) IsFull() (isFull bool) {
}
if avail, err := v.FreeDiskSpace(); err == nil {
- isFull = avail < MinFreeKilobytes
+ isFull = avail < BlockSize
} else {
- v.logger.WithError(err).Errorf("%s: FreeDiskSpace failed", v)
+ v.logger.WithError(err).Errorf("%s: FreeDiskSpace failed", v.DeviceID())
isFull = false
}
@@ -563,31 +486,26 @@ func (v *UnixVolume) IsFull() (isFull bool) {
// FreeDiskSpace returns the number of unused 1k blocks available on
// the volume.
-//
-func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
+func (v *unixVolume) FreeDiskSpace() (free uint64, err error) {
var fs syscall.Statfs_t
err = syscall.Statfs(v.Root, &fs)
if err == nil {
// Statfs output is not guaranteed to measure free
// space in terms of 1K blocks.
- free = fs.Bavail * uint64(fs.Bsize) / 1024
+ free = fs.Bavail * uint64(fs.Bsize)
}
return
}
-func (v *UnixVolume) String() string {
- return fmt.Sprintf("[UnixVolume %s]", v.Root)
-}
-
// InternalStats returns I/O and filesystem ops counters.
-func (v *UnixVolume) InternalStats() interface{} {
+func (v *unixVolume) InternalStats() interface{} {
return &v.os.stats
}
// lock acquires the serialize lock, if one is in use. If ctx is done
// before the lock is acquired, lock returns ctx.Err() instead of
// acquiring the lock.
-func (v *UnixVolume) lock(ctx context.Context) error {
+func (v *unixVolume) lock(ctx context.Context) error {
if v.locker == nil {
return nil
}
@@ -611,7 +529,7 @@ func (v *UnixVolume) lock(ctx context.Context) error {
}
// unlock releases the serialize lock, if one is in use.
-func (v *UnixVolume) unlock() {
+func (v *unixVolume) unlock() {
if v.locker == nil {
return
}
@@ -619,7 +537,7 @@ func (v *UnixVolume) unlock() {
}
// lockfile and unlockfile use flock(2) to manage kernel file locks.
-func (v *UnixVolume) lockfile(f *os.File) error {
+func (v *unixVolume) lockfile(f *os.File) error {
v.os.stats.TickOps("flock")
v.os.stats.Tick(&v.os.stats.FlockOps)
err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
@@ -627,7 +545,7 @@ func (v *UnixVolume) lockfile(f *os.File) error {
return err
}
-func (v *UnixVolume) unlockfile(f *os.File) error {
+func (v *unixVolume) unlockfile(f *os.File) error {
err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
v.os.stats.TickErr(err)
return err
@@ -635,7 +553,7 @@ func (v *UnixVolume) unlockfile(f *os.File) error {
// Where appropriate, translate a more specific filesystem error to an
// error recognized by handlers, like os.ErrNotExist.
-func (v *UnixVolume) translateError(err error) error {
+func (v *unixVolume) translateError(err error) error {
switch err.(type) {
case *os.PathError:
// stat() returns a PathError if the parent directory
@@ -650,11 +568,7 @@ var unixTrashLocRegexp = regexp.MustCompile(`/([0-9a-f]{32})\.trash\.(\d+)$`)
// EmptyTrash walks hierarchy looking for {hash}.trash.*
// and deletes those with deadline < now.
-func (v *UnixVolume) EmptyTrash() {
- if v.cluster.Collections.BlobDeleteConcurrency < 1 {
- return
- }
-
+func (v *unixVolume) EmptyTrash() {
var bytesDeleted, bytesInTrash int64
var blocksDeleted, blocksInTrash int64
diff --git a/services/keepstore/unix_volume_test.go b/services/keepstore/unix_volume_test.go
index 75d9b22de5..bcdb5f6358 100644
--- a/services/keepstore/unix_volume_test.go
+++ b/services/keepstore/unix_volume_test.go
@@ -8,91 +8,82 @@ import (
"bytes"
"context"
"encoding/json"
- "errors"
"fmt"
- "io"
"io/ioutil"
"os"
"sync"
"syscall"
"time"
- "git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/prometheus/client_golang/prometheus"
- "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
-type TestableUnixVolume struct {
- UnixVolume
+type testableUnixVolume struct {
+ unixVolume
t TB
}
-// PutRaw writes a Keep block directly into a UnixVolume, even if
-// the volume is readonly.
-func (v *TestableUnixVolume) PutRaw(locator string, data []byte) {
- defer func(orig bool) {
- v.volume.ReadOnly = orig
- }(v.volume.ReadOnly)
- v.volume.ReadOnly = false
- err := v.Put(context.Background(), locator, data)
+func (v *testableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
+ err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{Actime: lastPut.Unix(), Modtime: lastPut.Unix()})
if err != nil {
v.t.Fatal(err)
}
}
-func (v *TestableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
- err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{lastPut.Unix(), lastPut.Unix()})
- if err != nil {
- v.t.Fatal(err)
- }
-}
-
-func (v *TestableUnixVolume) Teardown() {
+func (v *testableUnixVolume) Teardown() {
if err := os.RemoveAll(v.Root); err != nil {
v.t.Error(err)
}
}
-func (v *TestableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
return "open", "create"
}
-var _ = check.Suite(&UnixVolumeSuite{})
+var _ = check.Suite(&unixVolumeSuite{})
-type UnixVolumeSuite struct {
- cluster *arvados.Cluster
- volumes []*TestableUnixVolume
- metrics *volumeMetricsVecs
+type unixVolumeSuite struct {
+ params newVolumeParams
+ volumes []*testableUnixVolume
}
-func (s *UnixVolumeSuite) SetUpTest(c *check.C) {
- s.cluster = testCluster(c)
- s.metrics = newVolumeMetricsVecs(prometheus.NewRegistry())
+func (s *unixVolumeSuite) SetUpTest(c *check.C) {
+ logger := ctxlog.TestLogger(c)
+ reg := prometheus.NewRegistry()
+ s.params = newVolumeParams{
+ UUID: "zzzzz-nyw5e-999999999999999",
+ Cluster: testCluster(c),
+ Logger: logger,
+ MetricsVecs: newVolumeMetricsVecs(reg),
+ BufferPool: newBufferPool(logger, 8, reg),
+ }
}
-func (s *UnixVolumeSuite) TearDownTest(c *check.C) {
+func (s *unixVolumeSuite) TearDownTest(c *check.C) {
for _, v := range s.volumes {
v.Teardown()
}
}
-func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, serialize bool) *TestableUnixVolume {
+func (s *unixVolumeSuite) newTestableUnixVolume(c *check.C, params newVolumeParams, serialize bool) *testableUnixVolume {
d, err := ioutil.TempDir("", "volume_test")
c.Check(err, check.IsNil)
var locker sync.Locker
if serialize {
locker = &sync.Mutex{}
}
- v := &TestableUnixVolume{
- UnixVolume: UnixVolume{
- Root: d,
- locker: locker,
- cluster: cluster,
- logger: ctxlog.TestLogger(c),
- volume: volume,
- metrics: metrics,
+ v := &testableUnixVolume{
+ unixVolume: unixVolume{
+ Root: d,
+ locker: locker,
+ uuid: params.UUID,
+ cluster: params.Cluster,
+ logger: params.Logger,
+ volume: params.ConfigVolume,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
},
t: c,
}
@@ -101,56 +92,45 @@ func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Clu
return v
}
-// serialize = false; readonly = false
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableUnixVolume(c, cluster, volume, metrics, false)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
+ DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+ return s.newTestableUnixVolume(c, params, false)
})
}
-// serialize = false; readonly = true
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsReadOnly(c *check.C) {
- DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableUnixVolume(c, cluster, volume, metrics, true)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_ReadOnly(c *check.C) {
+ DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+ return s.newTestableUnixVolume(c, params, false)
})
}
-// serialize = true; readonly = false
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsSerialized(c *check.C) {
- DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableUnixVolume(c, cluster, volume, metrics, false)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_Serialized(c *check.C) {
+ DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+ return s.newTestableUnixVolume(c, params, true)
})
}
-// serialize = true; readonly = true
-func (s *UnixVolumeSuite) TestUnixVolumeHandlersWithGenericVolumeTests(c *check.C) {
- DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
- return s.newTestableUnixVolume(c, cluster, volume, metrics, true)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_Readonly_Serialized(c *check.C) {
+ DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+ return s.newTestableUnixVolume(c, params, true)
})
}
-func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestGetNotFound(c *check.C) {
+ v := s.newTestableUnixVolume(c, s.params, true)
defer v.Teardown()
- v.Put(context.Background(), TestHash, TestBlock)
-
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash2, buf)
- switch {
- case os.IsNotExist(err):
- break
- case err == nil:
- c.Errorf("Read should have failed, returned %+q", buf[:n])
- default:
- c.Errorf("Read expected ErrNotExist, got: %s", err)
- }
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
+
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash2, buf)
+ c.Check(err, check.FitsTypeOf, os.ErrNotExist)
}
-func (s *UnixVolumeSuite) TestPut(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestPut(c *check.C) {
+ v := s.newTestableUnixVolume(c, s.params, false)
defer v.Teardown()
- err := v.Put(context.Background(), TestHash, TestBlock)
+ err := v.BlockWrite(context.Background(), TestHash, TestBlock)
if err != nil {
c.Error(err)
}
@@ -163,235 +143,85 @@ func (s *UnixVolumeSuite) TestPut(c *check.C) {
}
}
-func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestPutBadVolume(c *check.C) {
+ v := s.newTestableUnixVolume(c, s.params, false)
defer v.Teardown()
err := os.RemoveAll(v.Root)
c.Assert(err, check.IsNil)
- err = v.Put(context.Background(), TestHash, TestBlock)
+ err = v.BlockWrite(context.Background(), TestHash, TestBlock)
c.Check(err, check.IsNil)
}
-func (s *UnixVolumeSuite) TestUnixVolumeReadonly(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{ReadOnly: true, Replication: 1}, s.metrics, false)
- defer v.Teardown()
-
- v.PutRaw(TestHash, TestBlock)
-
- buf := make([]byte, BlockSize)
- _, err := v.Get(context.Background(), TestHash, buf)
- if err != nil {
- c.Errorf("got err %v, expected nil", err)
- }
-
- err = v.Put(context.Background(), TestHash, TestBlock)
- if err != MethodDisabledError {
- c.Errorf("got err %v, expected MethodDisabledError", err)
- }
-
- err = v.Touch(TestHash)
- if err != MethodDisabledError {
- c.Errorf("got err %v, expected MethodDisabledError", err)
- }
-
- err = v.Trash(TestHash)
- if err != MethodDisabledError {
- c.Errorf("got err %v, expected MethodDisabledError", err)
- }
-}
-
-func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestIsFull(c *check.C) {
+ v := s.newTestableUnixVolume(c, s.params, false)
defer v.Teardown()
fullPath := v.Root + "/full"
now := fmt.Sprintf("%d", time.Now().Unix())
os.Symlink(now, fullPath)
- if !v.IsFull() {
- c.Errorf("%s: claims not to be full", v)
+ if !v.isFull() {
+ c.Error("volume claims not to be full")
}
os.Remove(fullPath)
// Test with an expired /full link.
expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
os.Symlink(expired, fullPath)
- if v.IsFull() {
- c.Errorf("%s: should no longer be full", v)
+ if v.isFull() {
+ c.Error("volume should no longer be full")
}
}
-func (s *UnixVolumeSuite) TestNodeStatus(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
- defer v.Teardown()
-
- // Get node status and make a basic sanity check.
- volinfo := v.Status()
- if volinfo.MountPoint != v.Root {
- c.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.Root)
- }
- if volinfo.DeviceNum == 0 {
- c.Errorf("uninitialized device_num in %v", volinfo)
- }
- if volinfo.BytesFree == 0 {
- c.Errorf("uninitialized bytes_free in %v", volinfo)
- }
- if volinfo.BytesUsed == 0 {
- c.Errorf("uninitialized bytes_used in %v", volinfo)
- }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
- defer v.Teardown()
-
- v.Put(context.Background(), TestHash, TestBlock)
- mockErr := errors.New("Mock error")
- err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
- return mockErr
- })
- if err != mockErr {
- c.Errorf("Got %v, expected %v", err, mockErr)
- }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
- defer v.Teardown()
-
- funcCalled := false
- err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
- funcCalled = true
- return nil
- })
- if err == nil {
- c.Errorf("Expected error opening non-existent file")
- }
- if funcCalled {
- c.Errorf("Worker func should not have been called")
- }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
- defer v.Teardown()
-
- v.Put(context.Background(), TestHash, TestBlock)
-
- mtx := NewMockMutex()
- v.locker = mtx
-
- funcCalled := make(chan struct{})
- go v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
- funcCalled <- struct{}{}
- return nil
- })
- select {
- case mtx.AllowLock <- struct{}{}:
- case <-funcCalled:
- c.Fatal("Function was called before mutex was acquired")
- case <-time.After(5 * time.Second):
- c.Fatal("Timed out before mutex was acquired")
- }
- select {
- case <-funcCalled:
- case mtx.AllowUnlock <- struct{}{}:
- c.Fatal("Mutex was released before function was called")
- case <-time.After(5 * time.Second):
- c.Fatal("Timed out waiting for funcCalled")
- }
- select {
- case mtx.AllowUnlock <- struct{}{}:
- case <-time.After(5 * time.Second):
- c.Fatal("Timed out waiting for getFunc() to release mutex")
- }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeCompare(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
- defer v.Teardown()
-
- v.Put(context.Background(), TestHash, TestBlock)
- err := v.Compare(context.Background(), TestHash, TestBlock)
- if err != nil {
- c.Errorf("Got err %q, expected nil", err)
- }
-
- err = v.Compare(context.Background(), TestHash, []byte("baddata"))
- if err != CollisionError {
- c.Errorf("Got err %q, expected %q", err, CollisionError)
- }
-
- v.Put(context.Background(), TestHash, []byte("baddata"))
- err = v.Compare(context.Background(), TestHash, TestBlock)
- if err != DiskHashError {
- c.Errorf("Got err %q, expected %q", err, DiskHashError)
- }
-
- if os.Getuid() == 0 {
- c.Log("skipping 'permission denied' check when running as root")
- } else {
- p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
- err = os.Chmod(p, 000)
- c.Assert(err, check.IsNil)
- err = v.Compare(context.Background(), TestHash, TestBlock)
- c.Check(err, check.ErrorMatches, ".*permission denied.*")
- }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelPut(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, true)
+func (s *unixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
+ v := s.newTestableUnixVolume(c, s.params, true)
defer v.Teardown()
v.locker.Lock()
+ defer v.locker.Unlock()
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(50 * time.Millisecond)
cancel()
- time.Sleep(50 * time.Millisecond)
- v.locker.Unlock()
}()
- err := v.Put(ctx, TestHash, TestBlock)
+ err := v.BlockWrite(ctx, TestHash, TestBlock)
if err != context.Canceled {
- c.Errorf("Put() returned %s -- expected short read / canceled", err)
+ c.Errorf("BlockWrite() returned %s -- expected short read / canceled", err)
}
}
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelGet(c *check.C) {
- v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
+ v := s.newTestableUnixVolume(c, s.params, true)
defer v.Teardown()
- bpath := v.blockPath(TestHash)
- v.PutRaw(TestHash, TestBlock)
- os.Remove(bpath)
- err := syscall.Mkfifo(bpath, 0600)
+ err := v.BlockWrite(context.Background(), TestHash, TestBlock)
if err != nil {
- c.Fatalf("Mkfifo %s: %s", bpath, err)
+ c.Fatal(err)
}
- defer os.Remove(bpath)
ctx, cancel := context.WithCancel(context.Background())
+ v.locker.Lock()
+ defer v.locker.Unlock()
go func() {
time.Sleep(50 * time.Millisecond)
cancel()
}()
- buf := make([]byte, len(TestBlock))
- n, err := v.Get(ctx, TestHash, buf)
- if n == len(TestBlock) || err != context.Canceled {
- c.Errorf("Get() returned %d, %s -- expected short read / canceled", n, err)
+ buf := &brbuffer{}
+ err = v.BlockRead(ctx, TestHash, buf)
+ if buf.Len() != 0 || err != context.Canceled {
+ c.Errorf("BlockRead() returned %q, %s -- expected short read / canceled", buf.String(), err)
}
}
-func (s *UnixVolumeSuite) TestStats(c *check.C) {
- vol := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestStats(c *check.C) {
+ vol := s.newTestableUnixVolume(c, s.params, false)
stats := func() string {
buf, err := json.Marshal(vol.InternalStats())
c.Check(err, check.IsNil)
return string(buf)
}
- c.Check(stats(), check.Matches, `.*"StatOps":1,.*`) // (*UnixVolume)check() calls Stat() once
+ c.Check(stats(), check.Matches, `.*"StatOps":1,.*`) // (*unixVolume)check() calls Stat() once
c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
- loc := "acbd18db4cc2f85cedef654fccc4a4d8"
- _, err := vol.Get(context.Background(), loc, make([]byte, 3))
+ err := vol.BlockRead(context.Background(), fooHash, brdiscard)
c.Check(err, check.NotNil)
c.Check(stats(), check.Matches, `.*"StatOps":[^0],.*`)
c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
@@ -400,42 +230,42 @@ func (s *UnixVolumeSuite) TestStats(c *check.C) {
c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
c.Check(stats(), check.Matches, `.*"CreateOps":0,.*`)
- err = vol.Put(context.Background(), loc, []byte("foo"))
+ err = vol.BlockWrite(context.Background(), fooHash, []byte("foo"))
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
c.Check(stats(), check.Matches, `.*"UtimesOps":1,.*`)
- err = vol.Touch(loc)
+ err = vol.BlockTouch(fooHash)
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"FlockOps":1,.*`)
c.Check(stats(), check.Matches, `.*"OpenOps":1,.*`)
c.Check(stats(), check.Matches, `.*"UtimesOps":2,.*`)
- _, err = vol.Get(context.Background(), loc, make([]byte, 3))
- c.Check(err, check.IsNil)
- err = vol.Compare(context.Background(), loc, []byte("foo"))
+ buf := &brbuffer{}
+ err = vol.BlockRead(context.Background(), fooHash, buf)
c.Check(err, check.IsNil)
- c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
- c.Check(stats(), check.Matches, `.*"OpenOps":3,.*`)
+ c.Check(buf.String(), check.Equals, "foo")
+ c.Check(stats(), check.Matches, `.*"InBytes":3,.*`)
+ c.Check(stats(), check.Matches, `.*"OpenOps":2,.*`)
- err = vol.Trash(loc)
+ err = vol.BlockTrash(fooHash)
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"FlockOps":2,.*`)
}
-func (s *UnixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
- vol := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
+ vol := s.newTestableUnixVolume(c, s.params, false)
- err := os.Mkdir(vol.UnixVolume.Root+"/aaa", 0777)
+ err := os.Mkdir(vol.unixVolume.Root+"/aaa", 0777)
c.Assert(err, check.IsNil)
- err = os.Mkdir(vol.UnixVolume.Root+"/.aaa", 0777) // EmptyTrash should not look here
+ err = os.Mkdir(vol.unixVolume.Root+"/.aaa", 0777) // EmptyTrash should not look here
c.Assert(err, check.IsNil)
- deleteme := vol.UnixVolume.Root + "/aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
+ deleteme := vol.unixVolume.Root + "/aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
err = ioutil.WriteFile(deleteme, []byte{1, 2, 3}, 0777)
c.Assert(err, check.IsNil)
- skipme := vol.UnixVolume.Root + "/.aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
+ skipme := vol.unixVolume.Root + "/.aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
err = ioutil.WriteFile(skipme, []byte{1, 2, 3}, 0777)
c.Assert(err, check.IsNil)
vol.EmptyTrash()
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
index c3b8cd6283..cd61804913 100644
--- a/services/keepstore/volume.go
+++ b/services/keepstore/volume.go
@@ -6,426 +6,93 @@ package keepstore
import (
"context"
- "crypto/rand"
- "fmt"
"io"
- "math/big"
- "sort"
- "sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/sirupsen/logrus"
)
-type BlockWriter interface {
- // WriteBlock reads all data from r, writes it to a backing
- // store as "loc", and returns the number of bytes written.
- WriteBlock(ctx context.Context, loc string, r io.Reader) error
-}
-
-type BlockReader interface {
- // ReadBlock retrieves data previously stored as "loc" and
- // writes it to w.
- ReadBlock(ctx context.Context, loc string, w io.Writer) error
-}
-
-var driver = map[string]func(*arvados.Cluster, arvados.Volume, logrus.FieldLogger, *volumeMetricsVecs) (Volume, error){}
-
-// A Volume is an interface representing a Keep back-end storage unit:
-// for example, a single mounted disk, a RAID array, an Amazon S3 volume,
-// etc.
-type Volume interface {
- // Get a block: copy the block data into buf, and return the
- // number of bytes copied.
- //
- // loc is guaranteed to consist of 32 or more lowercase hex
- // digits.
- //
- // Get should not verify the integrity of the data: it should
- // just return whatever was found in its backing
- // store. (Integrity checking is the caller's responsibility.)
- //
- // If an error is encountered that prevents it from
- // retrieving the data, that error should be returned so the
- // caller can log (and send to the client) a more useful
- // message.
- //
- // If the error is "not found", and there's no particular
- // reason to expect the block to be found (other than that a
- // caller is asking for it), the returned error should satisfy
- // os.IsNotExist(err): this is a normal condition and will not
- // be logged as an error (except that a 404 will appear in the
- // access log if the block is not found on any other volumes
- // either).
- //
- // If the data in the backing store is bigger than len(buf),
- // then Get is permitted to return an error without reading
- // any of the data.
- //
- // len(buf) will not exceed BlockSize.
- Get(ctx context.Context, loc string, buf []byte) (int, error)
-
- // Compare the given data with the stored data (i.e., what Get
- // would return). If equal, return nil. If not, return
- // CollisionError or DiskHashError (depending on whether the
- // data on disk matches the expected hash), or whatever error
- // was encountered opening/reading the stored data.
- Compare(ctx context.Context, loc string, data []byte) error
-
- // Put writes a block to an underlying storage device.
- //
- // loc is as described in Get.
- //
- // len(block) is guaranteed to be between 0 and BlockSize.
- //
- // If a block is already stored under the same name (loc) with
- // different content, Put must either overwrite the existing
- // data with the new data or return a non-nil error. When
- // overwriting existing data, it must never leave the storage
- // device in an inconsistent state: a subsequent call to Get
- // must return either the entire old block, the entire new
- // block, or an error. (An implementation that cannot peform
- // atomic updates must leave the old data alone and return an
- // error.)
- //
- // Put also sets the timestamp for the given locator to the
- // current time.
- //
- // Put must return a non-nil error unless it can guarantee
- // that the entire block has been written and flushed to
- // persistent storage, and that its timestamp is current. Of
- // course, this guarantee is only as good as the underlying
- // storage device, but it is Put's responsibility to at least
- // get whatever guarantee is offered by the storage device.
+// volume is the interface to a back-end storage device.
+type volume interface {
+ // Return a unique identifier for the backend device. If
+ // possible, this should be chosen such that keepstore
+ // processes running on different hosts, and accessing the
+ // same backend device, will return the same string.
//
- // Put should not verify that loc==hash(block): this is the
- // caller's responsibility.
- Put(ctx context.Context, loc string, block []byte) error
+ // This helps keep-balance avoid redundantly downloading
+ // multiple index listings for the same backend device.
+ DeviceID() string
- // Touch sets the timestamp for the given locator to the
- // current time.
- //
- // loc is as described in Get.
- //
- // If invoked at time t0, Touch must guarantee that a
- // subsequent call to Mtime will return a timestamp no older
- // than {t0 minus one second}. For example, if Touch is called
- // at 2015-07-07T01:23:45.67890123Z, it is acceptable for a
- // subsequent Mtime to return any of the following:
+ // Copy a block from the backend device to writeTo.
//
- // - 2015-07-07T01:23:45.00000000Z
- // - 2015-07-07T01:23:45.67890123Z
- // - 2015-07-07T01:23:46.67890123Z
- // - 2015-07-08T00:00:00.00000000Z
+ // As with all volume methods, the hash argument is a
+ // 32-character hexadecimal string.
//
- // It is not acceptable for a subsequente Mtime to return
- // either of the following:
+ // Data can be written to writeTo in any order, and concurrent
+ // calls to writeTo.WriteAt() are allowed. However, BlockRead
+ // must not do multiple writes that intersect with any given
+ // byte offset.
//
- // - 2015-07-07T00:00:00.00000000Z -- ERROR
- // - 2015-07-07T01:23:44.00000000Z -- ERROR
+ // BlockRead is not expected to verify data integrity.
//
- // Touch must return a non-nil error if the timestamp cannot
- // be updated.
- Touch(loc string) error
+ // If the indicated block does not exist, or has been trashed,
+ // BlockRead must return os.ErrNotExist.
+ BlockRead(ctx context.Context, hash string, writeTo io.WriterAt) error
- // Mtime returns the stored timestamp for the given locator.
+ // Store a block on the backend device, and set its timestamp
+ // to the current time.
//
- // loc is as described in Get.
- //
- // Mtime must return a non-nil error if the given block is not
- // found or the timestamp could not be retrieved.
- Mtime(loc string) (time.Time, error)
+ // The implementation must ensure that regardless of any
+ // errors encountered while writing, a partially written block
+ // is not left behind: a subsequent BlockRead call must return
+ // either a) the data previously stored under the given hash,
+ // if any, or b) os.ErrNotExist.
+ BlockWrite(ctx context.Context, hash string, data []byte) error
- // IndexTo writes a complete list of locators with the given
- // prefix for which Get() can retrieve data.
- //
- // prefix consists of zero or more lowercase hexadecimal
- // digits.
- //
- // Each locator must be written to the given writer using the
- // following format:
- //
- // loc "+" size " " timestamp "\n"
- //
- // where:
- //
- // - size is the number of bytes of content, given as a
- // decimal number with one or more digits
- //
- // - timestamp is the timestamp stored for the locator,
- // given as a decimal number of seconds after January 1,
- // 1970 UTC.
- //
- // IndexTo must not write any other data to writer: for
- // example, it must not write any blank lines.
- //
- // If an error makes it impossible to provide a complete
- // index, IndexTo must return a non-nil error. It is
- // acceptable to return a non-nil error after writing a
- // partial index to writer.
- //
- // The resulting index is not expected to be sorted in any
- // particular order.
- IndexTo(prefix string, writer io.Writer) error
-
- // Trash moves the block data from the underlying storage
- // device to trash area. The block then stays in trash for
- // BlobTrashLifetime before it is actually deleted.
- //
- // loc is as described in Get.
- //
- // If the timestamp for the given locator is newer than
- // BlobSigningTTL, Trash must not trash the data.
- //
- // If a Trash operation overlaps with any Touch or Put
- // operations on the same locator, the implementation must
- // ensure one of the following outcomes:
- //
- // - Touch and Put return a non-nil error, or
- // - Trash does not trash the block, or
- // - Both of the above.
- //
- // If it is possible for the storage device to be accessed by
- // a different process or host, the synchronization mechanism
- // should also guard against races with other processes and
- // hosts. If such a mechanism is not available, there must be
- // a mechanism for detecting unsafe configurations, alerting
- // the operator, and aborting or falling back to a read-only
- // state. In other words, running multiple keepstore processes
- // with the same underlying storage device must either work
- // reliably or fail outright.
- //
- // Corollary: A successful Touch or Put guarantees a block
- // will not be trashed for at least BlobSigningTTL seconds.
- Trash(loc string) error
+ // Update the indicated block's stored timestamp to the
+ // current time.
+ BlockTouch(hash string) error
- // Untrash moves block from trash back into store
- Untrash(loc string) error
+ // Return the indicated block's stored timestamp.
+ Mtime(hash string) (time.Time, error)
- // Status returns a *VolumeStatus representing the current
- // in-use and available storage capacity and an
- // implementation-specific volume identifier (e.g., "mount
- // point" for a UnixVolume).
- Status() *VolumeStatus
+ // Mark the indicated block as trash, such that -- unless it
+ // is untrashed before time.Now() + BlobTrashLifetime --
+ // BlockRead returns os.ErrNotExist and the block is not
+ // listed by Index.
+ BlockTrash(hash string) error
- // String returns an identifying label for this volume,
- // suitable for including in log messages. It should contain
- // enough information to uniquely identify the underlying
- // storage device, but should not contain any credentials or
- // secrets.
- String() string
+ // Un-mark the indicated block as trash. If the block has not
+ // been trashed, return os.ErrNotExist.
+ BlockUntrash(hash string) error
- // EmptyTrash looks for trashed blocks that exceeded
- // BlobTrashLifetime and deletes them from the volume.
+ // Permanently delete all blocks that have been marked as
+ // trash for BlobTrashLifetime or longer.
EmptyTrash()
- // Return a globally unique ID of the underlying storage
- // device if possible, otherwise "".
- GetDeviceID() string
-}
-
-// A VolumeWithExamples provides example configs to display in the
-// -help message.
-type VolumeWithExamples interface {
- Volume
- Examples() []Volume
-}
-
-// A VolumeManager tells callers which volumes can read, which volumes
-// can write, and on which volume the next write should be attempted.
-type VolumeManager interface {
- // Mounts returns all mounts (volume attachments).
- Mounts() []*VolumeMount
-
- // Lookup returns the mount with the given UUID. Returns nil
- // if the mount does not exist. If write==true, returns nil if
- // the mount is not writable.
- Lookup(uuid string, write bool) *VolumeMount
-
- // AllReadable returns all mounts.
- AllReadable() []*VolumeMount
-
- // AllWritable returns all mounts that aren't known to be in
- // a read-only state. (There is no guarantee that a write to
- // one will succeed, though.)
- AllWritable() []*VolumeMount
-
- // NextWritable returns the volume where the next new block
- // should be written. A VolumeManager can select a volume in
- // order to distribute activity across spindles, fill up disks
- // with more free space, etc.
- NextWritable() *VolumeMount
-
- // VolumeStats returns the ioStats used for tracking stats for
- // the given Volume.
- VolumeStats(Volume) *ioStats
-
- // Close shuts down the volume manager cleanly.
- Close()
-}
-
-// A VolumeMount is an attachment of a Volume to a VolumeManager.
-type VolumeMount struct {
- arvados.KeepMount
- Volume
-}
-
-// Generate a UUID the way API server would for a "KeepVolumeMount"
-// object.
-func (*VolumeMount) generateUUID() string {
- var max big.Int
- _, ok := max.SetString("zzzzzzzzzzzzzzz", 36)
- if !ok {
- panic("big.Int parse failed")
- }
- r, err := rand.Int(rand.Reader, &max)
- if err != nil {
- panic(err)
- }
- return fmt.Sprintf("zzzzz-ivpuk-%015s", r.Text(36))
-}
-
-// RRVolumeManager is a round-robin VolumeManager: the Nth call to
-// NextWritable returns the (N % len(writables))th writable Volume
-// (where writables are all Volumes v where v.Writable()==true).
-type RRVolumeManager struct {
- mounts []*VolumeMount
- mountMap map[string]*VolumeMount
- readables []*VolumeMount
- writables []*VolumeMount
- counter uint32
- iostats map[Volume]*ioStats
-}
-
-func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, myURL arvados.URL, metrics *volumeMetricsVecs) (*RRVolumeManager, error) {
- vm := &RRVolumeManager{
- iostats: make(map[Volume]*ioStats),
- }
- vm.mountMap = make(map[string]*VolumeMount)
- for uuid, cfgvol := range cluster.Volumes {
- va, ok := cfgvol.AccessViaHosts[myURL]
- if !ok && len(cfgvol.AccessViaHosts) > 0 {
- continue
- }
- dri, ok := driver[cfgvol.Driver]
- if !ok {
- return nil, fmt.Errorf("volume %s: invalid driver %q", uuid, cfgvol.Driver)
- }
- vol, err := dri(cluster, cfgvol, logger, metrics)
- if err != nil {
- return nil, fmt.Errorf("error initializing volume %s: %s", uuid, err)
- }
- logger.Printf("started volume %s (%s), ReadOnly=%v", uuid, vol, cfgvol.ReadOnly || va.ReadOnly)
-
- sc := cfgvol.StorageClasses
- if len(sc) == 0 {
- sc = map[string]bool{"default": true}
- }
- repl := cfgvol.Replication
- if repl < 1 {
- repl = 1
- }
- mnt := &VolumeMount{
- KeepMount: arvados.KeepMount{
- UUID: uuid,
- DeviceID: vol.GetDeviceID(),
- ReadOnly: cfgvol.ReadOnly || va.ReadOnly,
- Replication: repl,
- StorageClasses: sc,
- },
- Volume: vol,
- }
- vm.iostats[vol] = &ioStats{}
- vm.mounts = append(vm.mounts, mnt)
- vm.mountMap[uuid] = mnt
- vm.readables = append(vm.readables, mnt)
- if !mnt.KeepMount.ReadOnly {
- vm.writables = append(vm.writables, mnt)
- }
- }
- // pri(mnt): return highest priority of any storage class
- // offered by mnt
- pri := func(mnt *VolumeMount) int {
- any, best := false, 0
- for class := range mnt.KeepMount.StorageClasses {
- if p := cluster.StorageClasses[class].Priority; !any || best < p {
- best = p
- any = true
- }
- }
- return best
- }
- // less(a,b): sort first by highest priority of any offered
- // storage class (highest->lowest), then by volume UUID
- less := func(a, b *VolumeMount) bool {
- if pa, pb := pri(a), pri(b); pa != pb {
- return pa > pb
- } else {
- return a.KeepMount.UUID < b.KeepMount.UUID
- }
- }
- sort.Slice(vm.readables, func(i, j int) bool {
- return less(vm.readables[i], vm.readables[j])
- })
- sort.Slice(vm.writables, func(i, j int) bool {
- return less(vm.writables[i], vm.writables[j])
- })
- sort.Slice(vm.mounts, func(i, j int) bool {
- return less(vm.mounts[i], vm.mounts[j])
- })
- return vm, nil
-}
-
-func (vm *RRVolumeManager) Mounts() []*VolumeMount {
- return vm.mounts
-}
-
-func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
- if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || !mnt.ReadOnly) {
- return mnt
- }
- return nil
-}
-
-// AllReadable returns an array of all readable volumes
-func (vm *RRVolumeManager) AllReadable() []*VolumeMount {
- return vm.readables
-}
-
-// AllWritable returns writable volumes, sorted by priority/uuid. Used
-// by CompareAndTouch to ensure higher-priority volumes are checked
-// first.
-func (vm *RRVolumeManager) AllWritable() []*VolumeMount {
- return vm.writables
-}
-
-// NextWritable returns writable volumes, rotated by vm.counter so
-// each volume gets a turn to be first. Used by PutBlock to distribute
-// new data across available volumes.
-func (vm *RRVolumeManager) NextWritable() []*VolumeMount {
- if len(vm.writables) == 0 {
- return nil
- }
- offset := (int(atomic.AddUint32(&vm.counter, 1)) - 1) % len(vm.writables)
- return append(append([]*VolumeMount(nil), vm.writables[offset:]...), vm.writables[:offset]...)
-}
-
-// VolumeStats returns an ioStats for the given volume.
-func (vm *RRVolumeManager) VolumeStats(v Volume) *ioStats {
- return vm.iostats[v]
+ // Write an index of all non-trashed blocks available on the
+ // backend device whose hash begins with the given prefix
+ // (prefix is a string of zero or more hexadecimal digits).
+ //
+ // Each block is written as "{hash}+{size} {timestamp}\n"
+ // where timestamp is a decimal-formatted number of
+ // nanoseconds since the UTC Unix epoch.
+ //
+ // Index should abort and return ctx.Err() if ctx is cancelled
+ // before indexing is complete.
+ Index(ctx context.Context, prefix string, writeTo io.Writer) error
}
-// Close the RRVolumeManager
-func (vm *RRVolumeManager) Close() {
-}
+type volumeDriver func(newVolumeParams) (volume, error)
-// VolumeStatus describes the current condition of a volume
-type VolumeStatus struct {
- MountPoint string
- DeviceNum uint64
- BytesFree uint64
- BytesUsed uint64
+type newVolumeParams struct {
+ UUID string
+ Cluster *arvados.Cluster
+ ConfigVolume arvados.Volume
+ Logger logrus.FieldLogger
+ MetricsVecs *volumeMetricsVecs
+ BufferPool *bufferPool
}
// ioStats tracks I/O statistics for a volume or server
@@ -439,7 +106,3 @@ type ioStats struct {
InBytes uint64
OutBytes uint64
}
-
-type InternalStatser interface {
- InternalStats() interface{}
-}
diff --git a/services/keepstore/volume_generic_test.go b/services/keepstore/volume_generic_test.go
index 0dd34e3af1..16084058b7 100644
--- a/services/keepstore/volume_generic_test.go
+++ b/services/keepstore/volume_generic_test.go
@@ -14,6 +14,7 @@ import (
"sort"
"strconv"
"strings"
+ "sync"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -39,7 +40,7 @@ type TB interface {
// A TestableVolumeFactory returns a new TestableVolume. The factory
// function, and the TestableVolume it returns, can use "t" to write
// logs, fail the current test, etc.
-type TestableVolumeFactory func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume
+type TestableVolumeFactory func(t TB, params newVolumeParams) TestableVolume
// DoGenericVolumeTests runs a set of tests that every TestableVolume
// is expected to pass. It calls factory to create a new TestableVolume
@@ -51,16 +52,6 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
s.testGet(t, factory)
s.testGetNoSuchBlock(t, factory)
- s.testCompareNonexistent(t, factory)
- s.testCompareSameContent(t, factory, TestHash, TestBlock)
- s.testCompareSameContent(t, factory, EmptyHash, EmptyBlock)
- s.testCompareWithCollision(t, factory, TestHash, TestBlock, []byte("baddata"))
- s.testCompareWithCollision(t, factory, TestHash, TestBlock, EmptyBlock)
- s.testCompareWithCollision(t, factory, EmptyHash, EmptyBlock, TestBlock)
- s.testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, []byte("baddata"))
- s.testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, EmptyBlock)
- s.testCompareWithCorruptStoredData(t, factory, EmptyHash, EmptyBlock, []byte("baddata"))
-
if !readonly {
s.testPutBlockWithSameContent(t, factory, TestHash, TestBlock)
s.testPutBlockWithSameContent(t, factory, EmptyHash, EmptyBlock)
@@ -76,7 +67,7 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
s.testMtimeNoSuchBlock(t, factory)
- s.testIndexTo(t, factory)
+ s.testIndex(t, factory)
if !readonly {
s.testDeleteNewBlock(t, factory)
@@ -84,33 +75,24 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
}
s.testDeleteNoSuchBlock(t, factory)
- s.testStatus(t, factory)
-
s.testMetrics(t, readonly, factory)
- s.testString(t, factory)
-
- if readonly {
- s.testUpdateReadOnly(t, factory)
- }
-
s.testGetConcurrent(t, factory)
if !readonly {
s.testPutConcurrent(t, factory)
-
s.testPutFullBlock(t, factory)
+ s.testTrashUntrash(t, readonly, factory)
+ s.testTrashEmptyTrashUntrash(t, factory)
}
-
- s.testTrashUntrash(t, readonly, factory)
- s.testTrashEmptyTrashUntrash(t, factory)
}
type genericVolumeSuite struct {
- cluster *arvados.Cluster
- volume arvados.Volume
- logger logrus.FieldLogger
- metrics *volumeMetricsVecs
- registry *prometheus.Registry
+ cluster *arvados.Cluster
+ volume arvados.Volume
+ logger logrus.FieldLogger
+ metrics *volumeMetricsVecs
+ registry *prometheus.Registry
+ bufferPool *bufferPool
}
func (s *genericVolumeSuite) setup(t TB) {
@@ -118,10 +100,18 @@ func (s *genericVolumeSuite) setup(t TB) {
s.logger = ctxlog.TestLogger(t)
s.registry = prometheus.NewRegistry()
s.metrics = newVolumeMetricsVecs(s.registry)
+ s.bufferPool = newBufferPool(s.logger, 8, s.registry)
}
func (s *genericVolumeSuite) newVolume(t TB, factory TestableVolumeFactory) TestableVolume {
- return factory(t, s.cluster, s.volume, s.logger, s.metrics)
+ return factory(t, newVolumeParams{
+ UUID: "zzzzz-nyw5e-999999999999999",
+ Cluster: s.cluster,
+ ConfigVolume: s.volume,
+ Logger: s.logger,
+ MetricsVecs: s.metrics,
+ BufferPool: s.bufferPool,
+ })
}
// Put a test block, get it and verify content
@@ -131,95 +121,30 @@ func (s *genericVolumeSuite) testGet(t TB, factory TestableVolumeFactory) {
v := s.newVolume(t, factory)
defer v.Teardown()
- v.PutRaw(TestHash, TestBlock)
-
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, buf)
+ err := v.BlockWrite(context.Background(), TestHash, TestBlock)
if err != nil {
- t.Fatal(err)
- }
-
- if bytes.Compare(buf[:n], TestBlock) != 0 {
- t.Errorf("expected %s, got %s", string(TestBlock), string(buf))
- }
-}
-
-// Invoke get on a block that does not exist in volume; should result in error
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testGetNoSuchBlock(t TB, factory TestableVolumeFactory) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- buf := make([]byte, BlockSize)
- if _, err := v.Get(context.Background(), TestHash2, buf); err == nil {
- t.Errorf("Expected error while getting non-existing block %v", TestHash2)
- }
-}
-
-// Compare() should return os.ErrNotExist if the block does not exist.
-// Otherwise, writing new data causes CompareAndTouch() to generate
-// error logs even though everything is working fine.
-func (s *genericVolumeSuite) testCompareNonexistent(t TB, factory TestableVolumeFactory) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- err := v.Compare(context.Background(), TestHash, TestBlock)
- if err != os.ErrNotExist {
- t.Errorf("Got err %T %q, expected os.ErrNotExist", err, err)
+ t.Error(err)
}
-}
-// Put a test block and compare the locator with same content
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareSameContent(t TB, factory TestableVolumeFactory, testHash string, testData []byte) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- v.PutRaw(testHash, testData)
-
- // Compare the block locator with same content
- err := v.Compare(context.Background(), testHash, testData)
+ buf := &brbuffer{}
+ err = v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
- t.Errorf("Got err %q, expected nil", err)
+ t.Error(err)
}
-}
-
-// Test behavior of Compare() when stored data matches expected
-// checksum but differs from new data we need to store. Requires
-// testHash = md5(testDataA).
-//
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareWithCollision(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- v.PutRaw(testHash, testDataA)
-
- // Compare the block locator with different content; collision
- err := v.Compare(context.Background(), TestHash, testDataB)
- if err == nil {
- t.Errorf("Got err nil, expected error due to collision")
+ if bytes.Compare(buf.Bytes(), TestBlock) != 0 {
+ t.Errorf("expected %s, got %s", "foo", buf.String())
}
}
-// Test behavior of Compare() when stored data has become
-// corrupted. Requires testHash = md5(testDataA) != md5(testDataB).
-//
+// Invoke get on a block that does not exist in volume; should result in error
// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareWithCorruptStoredData(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
+func (s *genericVolumeSuite) testGetNoSuchBlock(t TB, factory TestableVolumeFactory) {
s.setup(t)
v := s.newVolume(t, factory)
defer v.Teardown()
- v.PutRaw(TestHash, testDataB)
-
- err := v.Compare(context.Background(), testHash, testDataA)
- if err == nil || err == CollisionError {
- t.Errorf("Got err %+v, expected non-collision error", err)
+ if err := v.BlockRead(context.Background(), barHash, brdiscard); err == nil {
+ t.Errorf("Expected error while getting non-existing block %v", barHash)
}
}
@@ -230,12 +155,12 @@ func (s *genericVolumeSuite) testPutBlockWithSameContent(t TB, factory TestableV
v := s.newVolume(t, factory)
defer v.Teardown()
- err := v.Put(context.Background(), testHash, testData)
+ err := v.BlockWrite(context.Background(), testHash, testData)
if err != nil {
t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
}
- err = v.Put(context.Background(), testHash, testData)
+ err = v.BlockWrite(context.Background(), testHash, testData)
if err != nil {
t.Errorf("Got err putting block second time %q: %q, expected nil", TestBlock, err)
}
@@ -248,23 +173,23 @@ func (s *genericVolumeSuite) testPutBlockWithDifferentContent(t TB, factory Test
v := s.newVolume(t, factory)
defer v.Teardown()
- v.PutRaw(testHash, testDataA)
+ v.BlockWrite(context.Background(), testHash, testDataA)
- putErr := v.Put(context.Background(), testHash, testDataB)
- buf := make([]byte, BlockSize)
- n, getErr := v.Get(context.Background(), testHash, buf)
+ putErr := v.BlockWrite(context.Background(), testHash, testDataB)
+ buf := &brbuffer{}
+ getErr := v.BlockRead(context.Background(), testHash, buf)
if putErr == nil {
// Put must not return a nil error unless it has
// overwritten the existing data.
- if bytes.Compare(buf[:n], testDataB) != 0 {
- t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf[:n], testDataB)
+ if buf.String() != string(testDataB) {
+ t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf, testDataB)
}
} else {
// It is permissible for Put to fail, but it must
// leave us with either the original data, the new
// data, or nothing at all.
- if getErr == nil && bytes.Compare(buf[:n], testDataA) != 0 && bytes.Compare(buf[:n], testDataB) != 0 {
- t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf[:n], testDataA, testDataB)
+ if getErr == nil && buf.String() != string(testDataA) && buf.String() != string(testDataB) {
+ t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf, testDataA, testDataB)
}
}
}
@@ -276,66 +201,67 @@ func (s *genericVolumeSuite) testPutMultipleBlocks(t TB, factory TestableVolumeF
v := s.newVolume(t, factory)
defer v.Teardown()
- err := v.Put(context.Background(), TestHash, TestBlock)
+ err := v.BlockWrite(context.Background(), TestHash, TestBlock)
if err != nil {
t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
}
- err = v.Put(context.Background(), TestHash2, TestBlock2)
+ err = v.BlockWrite(context.Background(), TestHash2, TestBlock2)
if err != nil {
t.Errorf("Got err putting block %q: %q, expected nil", TestBlock2, err)
}
- err = v.Put(context.Background(), TestHash3, TestBlock3)
+ err = v.BlockWrite(context.Background(), TestHash3, TestBlock3)
if err != nil {
t.Errorf("Got err putting block %q: %q, expected nil", TestBlock3, err)
}
- data := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, data)
+ buf := &brbuffer{}
+ err = v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
t.Error(err)
} else {
- if bytes.Compare(data[:n], TestBlock) != 0 {
- t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock)
+ if bytes.Compare(buf.Bytes(), TestBlock) != 0 {
+ t.Errorf("Block present, but got %+q, expected %+q", buf, TestBlock)
}
}
- n, err = v.Get(context.Background(), TestHash2, data)
+ buf.Reset()
+ err = v.BlockRead(context.Background(), TestHash2, buf)
if err != nil {
t.Error(err)
} else {
- if bytes.Compare(data[:n], TestBlock2) != 0 {
- t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock2)
+ if bytes.Compare(buf.Bytes(), TestBlock2) != 0 {
+ t.Errorf("Block present, but got %+q, expected %+q", buf, TestBlock2)
}
}
- n, err = v.Get(context.Background(), TestHash3, data)
+ buf.Reset()
+ err = v.BlockRead(context.Background(), TestHash3, buf)
if err != nil {
t.Error(err)
} else {
- if bytes.Compare(data[:n], TestBlock3) != 0 {
- t.Errorf("Block present, but to %+q, expected %+q", data[:n], TestBlock3)
+ if bytes.Compare(buf.Bytes(), TestBlock3) != 0 {
+ t.Errorf("Block present, but to %+q, expected %+q", buf, TestBlock3)
}
}
}
-// testPutAndTouch
-// Test that when applying PUT to a block that already exists,
-// the block's modification time is updated.
-// Test is intended for only writable volumes
+// testPutAndTouch checks that when applying PUT to a block that
+// already exists, the block's modification time is updated. Intended
+// for only writable volumes.
func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory) {
s.setup(t)
v := s.newVolume(t, factory)
defer v.Teardown()
- if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+ if err := v.BlockWrite(context.Background(), TestHash, TestBlock); err != nil {
t.Error(err)
}
// We'll verify { t0 < threshold < t1 }, where t0 is the
- // existing block's timestamp on disk before Put() and t1 is
- // its timestamp after Put().
+ // existing block's timestamp on disk before BlockWrite() and t1 is
+ // its timestamp after BlockWrite().
threshold := time.Now().Add(-time.Second)
// Set the stored block's mtime far enough in the past that we
@@ -349,7 +275,7 @@ func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory
}
// Write the same block again.
- if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+ if err := v.BlockWrite(context.Background(), TestHash, TestBlock); err != nil {
t.Error(err)
}
@@ -368,7 +294,7 @@ func (s *genericVolumeSuite) testTouchNoSuchBlock(t TB, factory TestableVolumeFa
v := s.newVolume(t, factory)
defer v.Teardown()
- if err := v.Touch(TestHash); err == nil {
+ if err := v.BlockTouch(TestHash); err == nil {
t.Error("Expected error when attempted to touch a non-existing block")
}
}
@@ -385,12 +311,12 @@ func (s *genericVolumeSuite) testMtimeNoSuchBlock(t TB, factory TestableVolumeFa
}
}
-// Put a few blocks and invoke IndexTo with:
+// Put a few blocks and invoke Index with:
// * no prefix
// * with a prefix
// * with no such prefix
// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
+func (s *genericVolumeSuite) testIndex(t TB, factory TestableVolumeFactory) {
s.setup(t)
v := s.newVolume(t, factory)
defer v.Teardown()
@@ -401,9 +327,9 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
minMtime := time.Now().UTC().UnixNano()
minMtime -= minMtime % 1e9
- v.PutRaw(TestHash, TestBlock)
- v.PutRaw(TestHash2, TestBlock2)
- v.PutRaw(TestHash3, TestBlock3)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash2, TestBlock2)
+ v.BlockWrite(context.Background(), TestHash3, TestBlock3)
maxMtime := time.Now().UTC().UnixNano()
if maxMtime%1e9 > 0 {
@@ -413,13 +339,13 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
// Blocks whose names aren't Keep hashes should be omitted from
// index
- v.PutRaw("fffffffffnotreallyahashfffffffff", nil)
- v.PutRaw("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
- v.PutRaw("f0000000000000000000000000000000f", nil)
- v.PutRaw("f00", nil)
+ v.BlockWrite(context.Background(), "fffffffffnotreallyahashfffffffff", nil)
+ v.BlockWrite(context.Background(), "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
+ v.BlockWrite(context.Background(), "f0000000000000000000000000000000f", nil)
+ v.BlockWrite(context.Background(), "f00", nil)
buf := new(bytes.Buffer)
- v.IndexTo("", buf)
+ v.Index(context.Background(), "", buf)
indexRows := strings.Split(string(buf.Bytes()), "\n")
sort.Strings(indexRows)
sortedIndex := strings.Join(indexRows, "\n")
@@ -442,7 +368,7 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
for _, prefix := range []string{"f", "f15", "f15ac"} {
buf = new(bytes.Buffer)
- v.IndexTo(prefix, buf)
+ v.Index(context.Background(), prefix, buf)
m, err := regexp.MatchString(`^`+TestHash2+`\+\d+ \d+\n$`, string(buf.Bytes()))
if err != nil {
@@ -454,11 +380,11 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
for _, prefix := range []string{"zero", "zip", "zilch"} {
buf = new(bytes.Buffer)
- err := v.IndexTo(prefix, buf)
+ err := v.Index(context.Background(), prefix, buf)
if err != nil {
- t.Errorf("Got error on IndexTo with no such prefix %v", err.Error())
+ t.Errorf("Got error on Index with no such prefix %v", err.Error())
} else if buf.Len() != 0 {
- t.Errorf("Expected empty list for IndexTo with no such prefix %s", prefix)
+ t.Errorf("Expected empty list for Index with no such prefix %s", prefix)
}
}
}
@@ -472,17 +398,17 @@ func (s *genericVolumeSuite) testDeleteNewBlock(t TB, factory TestableVolumeFact
v := s.newVolume(t, factory)
defer v.Teardown()
- v.Put(context.Background(), TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
- if err := v.Trash(TestHash); err != nil {
+ if err := v.BlockTrash(TestHash); err != nil {
t.Error(err)
}
- data := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, data)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
t.Error(err)
- } else if bytes.Compare(data[:n], TestBlock) != 0 {
- t.Errorf("Got data %+q, expected %+q", data[:n], TestBlock)
+ } else if buf.String() != string(TestBlock) {
+ t.Errorf("Got data %+q, expected %+q", buf.String(), TestBlock)
}
}
@@ -495,36 +421,30 @@ func (s *genericVolumeSuite) testDeleteOldBlock(t TB, factory TestableVolumeFact
v := s.newVolume(t, factory)
defer v.Teardown()
- v.Put(context.Background(), TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
- if err := v.Trash(TestHash); err != nil {
+ if err := v.BlockTrash(TestHash); err != nil {
t.Error(err)
}
- data := make([]byte, BlockSize)
- if _, err := v.Get(context.Background(), TestHash, data); err == nil || !os.IsNotExist(err) {
+ if err := v.BlockRead(context.Background(), TestHash, brdiscard); err == nil || !os.IsNotExist(err) {
t.Errorf("os.IsNotExist(%v) should have been true", err)
}
_, err := v.Mtime(TestHash)
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
- }
-
- err = v.Compare(context.Background(), TestHash, TestBlock)
- if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
indexBuf := new(bytes.Buffer)
- v.IndexTo("", indexBuf)
+ v.Index(context.Background(), "", indexBuf)
if strings.Contains(string(indexBuf.Bytes()), TestHash) {
- t.Fatalf("Found trashed block in IndexTo")
+ t.Errorf("Found trashed block in Index")
}
- err = v.Touch(TestHash)
+ err = v.BlockTouch(TestHash)
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
}
@@ -535,33 +455,11 @@ func (s *genericVolumeSuite) testDeleteNoSuchBlock(t TB, factory TestableVolumeF
v := s.newVolume(t, factory)
defer v.Teardown()
- if err := v.Trash(TestHash2); err == nil {
+ if err := v.BlockTrash(TestHash2); err == nil {
t.Errorf("Expected error when attempting to delete a non-existing block")
}
}
-// Invoke Status and verify that VolumeStatus is returned
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testStatus(t TB, factory TestableVolumeFactory) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- // Get node status and make a basic sanity check.
- status := v.Status()
- if status.DeviceNum == 0 {
- t.Errorf("uninitialized device_num in %v", status)
- }
-
- if status.BytesFree == 0 {
- t.Errorf("uninitialized bytes_free in %v", status)
- }
-
- if status.BytesUsed == 0 {
- t.Errorf("uninitialized bytes_used in %v", status)
- }
-}
-
func getValueFrom(cv *prometheus.CounterVec, lbls prometheus.Labels) float64 {
c, _ := cv.GetMetricWith(lbls)
pb := &dto.Metric{}
@@ -576,7 +474,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
v := s.newVolume(t, factory)
defer v.Teardown()
- opsC, _, ioC := s.metrics.getCounterVecsFor(prometheus.Labels{"device_id": v.GetDeviceID()})
+ opsC, _, ioC := s.metrics.getCounterVecsFor(prometheus.Labels{"device_id": v.DeviceID()})
if ioC == nil {
t.Error("ioBytes CounterVec is nil")
@@ -601,7 +499,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
// Test Put if volume is writable
if !readonly {
- err = v.Put(context.Background(), TestHash, TestBlock)
+ err = v.BlockWrite(context.Background(), TestHash, TestBlock)
if err != nil {
t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
}
@@ -615,13 +513,12 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
t.Error("ioBytes{direction=out} counter shouldn't be zero")
}
} else {
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
}
- buf := make([]byte, BlockSize)
- _, err = v.Get(context.Background(), TestHash, buf)
+ err = v.BlockRead(context.Background(), TestHash, brdiscard)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// Check that the operations counter increased
@@ -635,63 +532,6 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
}
}
-// Invoke String for the volume; expect non-empty result
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testString(t TB, factory TestableVolumeFactory) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- if id := v.String(); len(id) == 0 {
- t.Error("Got empty string for v.String()")
- }
-}
-
-// Putting, updating, touching, and deleting blocks from a read-only volume result in error.
-// Test is intended for only read-only volumes
-func (s *genericVolumeSuite) testUpdateReadOnly(t TB, factory TestableVolumeFactory) {
- s.setup(t)
- v := s.newVolume(t, factory)
- defer v.Teardown()
-
- v.PutRaw(TestHash, TestBlock)
- buf := make([]byte, BlockSize)
-
- // Get from read-only volume should succeed
- _, err := v.Get(context.Background(), TestHash, buf)
- if err != nil {
- t.Errorf("got err %v, expected nil", err)
- }
-
- // Put a new block to read-only volume should result in error
- err = v.Put(context.Background(), TestHash2, TestBlock2)
- if err == nil {
- t.Errorf("Expected error when putting block in a read-only volume")
- }
- _, err = v.Get(context.Background(), TestHash2, buf)
- if err == nil {
- t.Errorf("Expected error when getting block whose put in read-only volume failed")
- }
-
- // Touch a block in read-only volume should result in error
- err = v.Touch(TestHash)
- if err == nil {
- t.Errorf("Expected error when touching block in a read-only volume")
- }
-
- // Delete a block from a read-only volume should result in error
- err = v.Trash(TestHash)
- if err == nil {
- t.Errorf("Expected error when deleting block from a read-only volume")
- }
-
- // Overwriting an existing block in read-only volume should result in error
- err = v.Put(context.Background(), TestHash, TestBlock)
- if err == nil {
- t.Errorf("Expected error when putting block in a read-only volume")
- }
-}
-
// Launch concurrent Gets
// Test should pass for both writable and read-only volumes
func (s *genericVolumeSuite) testGetConcurrent(t TB, factory TestableVolumeFactory) {
@@ -699,43 +539,43 @@ func (s *genericVolumeSuite) testGetConcurrent(t TB, factory TestableVolumeFacto
v := s.newVolume(t, factory)
defer v.Teardown()
- v.PutRaw(TestHash, TestBlock)
- v.PutRaw(TestHash2, TestBlock2)
- v.PutRaw(TestHash3, TestBlock3)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash2, TestBlock2)
+ v.BlockWrite(context.Background(), TestHash3, TestBlock3)
sem := make(chan int)
go func() {
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, buf)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
t.Errorf("err1: %v", err)
}
- if bytes.Compare(buf[:n], TestBlock) != 0 {
- t.Errorf("buf should be %s, is %s", string(TestBlock), string(buf[:n]))
+ if buf.String() != string(TestBlock) {
+ t.Errorf("buf should be %s, is %s", TestBlock, buf)
}
sem <- 1
}()
go func() {
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash2, buf)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash2, buf)
if err != nil {
t.Errorf("err2: %v", err)
}
- if bytes.Compare(buf[:n], TestBlock2) != 0 {
- t.Errorf("buf should be %s, is %s", string(TestBlock2), string(buf[:n]))
+ if buf.String() != string(TestBlock2) {
+ t.Errorf("buf should be %s, is %s", TestBlock2, buf)
}
sem <- 1
}()
go func() {
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash3, buf)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash3, buf)
if err != nil {
t.Errorf("err3: %v", err)
}
- if bytes.Compare(buf[:n], TestBlock3) != 0 {
- t.Errorf("buf should be %s, is %s", string(TestBlock3), string(buf[:n]))
+ if buf.String() != string(TestBlock3) {
+ t.Errorf("buf should be %s, is %s", TestBlock3, buf)
}
sem <- 1
}()
@@ -753,60 +593,38 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
v := s.newVolume(t, factory)
defer v.Teardown()
- sem := make(chan int)
- go func(sem chan int) {
- err := v.Put(context.Background(), TestHash, TestBlock)
- if err != nil {
- t.Errorf("err1: %v", err)
- }
- sem <- 1
- }(sem)
-
- go func(sem chan int) {
- err := v.Put(context.Background(), TestHash2, TestBlock2)
+ blks := []struct {
+ hash string
+ data []byte
+ }{
+ {hash: TestHash, data: TestBlock},
+ {hash: TestHash2, data: TestBlock2},
+ {hash: TestHash3, data: TestBlock3},
+ }
+
+ var wg sync.WaitGroup
+ for _, blk := range blks {
+ blk := blk
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := v.BlockWrite(context.Background(), blk.hash, blk.data)
+ if err != nil {
+ t.Errorf("%s: %v", blk.hash, err)
+ }
+ }()
+ }
+ wg.Wait()
+
+ // Check that we actually wrote the blocks.
+ for _, blk := range blks {
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), blk.hash, buf)
if err != nil {
- t.Errorf("err2: %v", err)
+ t.Errorf("get %s: %v", blk.hash, err)
+ } else if buf.String() != string(blk.data) {
+ t.Errorf("get %s: expected %s, got %s", blk.hash, blk.data, buf)
}
- sem <- 1
- }(sem)
-
- go func(sem chan int) {
- err := v.Put(context.Background(), TestHash3, TestBlock3)
- if err != nil {
- t.Errorf("err3: %v", err)
- }
- sem <- 1
- }(sem)
-
- // Wait for all goroutines to finish
- for done := 0; done < 3; done++ {
- <-sem
- }
-
- // Double check that we actually wrote the blocks we expected to write.
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, buf)
- if err != nil {
- t.Errorf("Get #1: %v", err)
- }
- if bytes.Compare(buf[:n], TestBlock) != 0 {
- t.Errorf("Get #1: expected %s, got %s", string(TestBlock), string(buf[:n]))
- }
-
- n, err = v.Get(context.Background(), TestHash2, buf)
- if err != nil {
- t.Errorf("Get #2: %v", err)
- }
- if bytes.Compare(buf[:n], TestBlock2) != 0 {
- t.Errorf("Get #2: expected %s, got %s", string(TestBlock2), string(buf[:n]))
- }
-
- n, err = v.Get(context.Background(), TestHash3, buf)
- if err != nil {
- t.Errorf("Get #3: %v", err)
- }
- if bytes.Compare(buf[:n], TestBlock3) != 0 {
- t.Errorf("Get #3: expected %s, got %s", string(TestBlock3), string(buf[:n]))
}
}
@@ -820,17 +638,18 @@ func (s *genericVolumeSuite) testPutFullBlock(t TB, factory TestableVolumeFactor
wdata[0] = 'a'
wdata[BlockSize-1] = 'z'
hash := fmt.Sprintf("%x", md5.Sum(wdata))
- err := v.Put(context.Background(), hash, wdata)
+ err := v.BlockWrite(context.Background(), hash, wdata)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), hash, buf)
+
+ buf := &brbuffer{}
+ err = v.BlockRead(context.Background(), hash, buf)
if err != nil {
t.Error(err)
}
- if bytes.Compare(buf[:n], wdata) != 0 {
- t.Error("buf %+q != wdata %+q", buf[:n], wdata)
+ if buf.String() != string(wdata) {
+ t.Errorf("buf (len %d) != wdata (len %d)", buf.Len(), len(wdata))
}
}
@@ -845,48 +664,44 @@ func (s *genericVolumeSuite) testTrashUntrash(t TB, readonly bool, factory Testa
defer v.Teardown()
// put block and backdate it
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, buf)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
- if bytes.Compare(buf[:n], TestBlock) != 0 {
- t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+ if buf.String() != string(TestBlock) {
+ t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
}
// Trash
- err = v.Trash(TestHash)
- if readonly {
- if err != MethodDisabledError {
- t.Fatal(err)
- }
- } else if err != nil {
- if err != ErrNotImplemented {
- t.Fatal(err)
- }
- } else {
- _, err = v.Get(context.Background(), TestHash, buf)
- if err == nil || !os.IsNotExist(err) {
- t.Errorf("os.IsNotExist(%v) should have been true", err)
- }
+ err = v.BlockTrash(TestHash)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ buf.Reset()
+ err = v.BlockRead(context.Background(), TestHash, buf)
+ if err == nil || !os.IsNotExist(err) {
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
+ }
- // Untrash
- err = v.Untrash(TestHash)
- if err != nil {
- t.Fatal(err)
- }
+ // Untrash
+ err = v.BlockUntrash(TestHash)
+ if err != nil {
+ t.Error(err)
}
// Get the block - after trash and untrash sequence
- n, err = v.Get(context.Background(), TestHash, buf)
+ buf.Reset()
+ err = v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
- if bytes.Compare(buf[:n], TestBlock) != 0 {
- t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+ if buf.String() != string(TestBlock) {
+ t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
}
}
@@ -896,13 +711,13 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
defer v.Teardown()
checkGet := func() error {
- buf := make([]byte, BlockSize)
- n, err := v.Get(context.Background(), TestHash, buf)
+ buf := &brbuffer{}
+ err := v.BlockRead(context.Background(), TestHash, buf)
if err != nil {
return err
}
- if bytes.Compare(buf[:n], TestBlock) != 0 {
- t.Fatalf("Got data %+q, expected %+q", buf[:n], TestBlock)
+ if buf.String() != string(TestBlock) {
+ t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
}
_, err = v.Mtime(TestHash)
@@ -910,13 +725,8 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
return err
}
- err = v.Compare(context.Background(), TestHash, TestBlock)
- if err != nil {
- return err
- }
-
indexBuf := new(bytes.Buffer)
- v.IndexTo("", indexBuf)
+ v.Index(context.Background(), "", indexBuf)
if !strings.Contains(string(indexBuf.Bytes()), TestHash) {
return os.ErrNotExist
}
@@ -928,50 +738,47 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
s.cluster.Collections.BlobTrashLifetime.Set("1h")
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
err := checkGet()
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// Trash the block
- err = v.Trash(TestHash)
- if err == MethodDisabledError || err == ErrNotImplemented {
- // Skip the trash tests for read-only volumes, and
- // volume types that don't support
- // BlobTrashLifetime>0.
- return
+ err = v.BlockTrash(TestHash)
+ if err != nil {
+ t.Error(err)
}
err = checkGet()
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
- err = v.Touch(TestHash)
+ err = v.BlockTouch(TestHash)
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
v.EmptyTrash()
// Even after emptying the trash, we can untrash our block
// because the deadline hasn't been reached.
- err = v.Untrash(TestHash)
+ err = v.BlockUntrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
err = checkGet()
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
- err = v.Touch(TestHash)
+ err = v.BlockTouch(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// Because we Touch'ed, need to backdate again for next set of tests
@@ -980,16 +787,16 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
// If the only block in the trash has already been untrashed,
// most volumes will fail a subsequent Untrash with a 404, but
// it's also acceptable for Untrash to succeed.
- err = v.Untrash(TestHash)
+ err = v.BlockUntrash(TestHash)
if err != nil && !os.IsNotExist(err) {
- t.Fatalf("Expected success or os.IsNotExist(), but got: %v", err)
+ t.Errorf("Expected success or os.IsNotExist(), but got: %v", err)
}
// The additional Untrash should not interfere with our
// already-untrashed copy.
err = checkGet()
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// Untrash might have updated the timestamp, so backdate again
@@ -999,74 +806,74 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
s.cluster.Collections.BlobTrashLifetime.Set("1ns")
- err = v.Trash(TestHash)
+ err = v.BlockTrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
err = checkGet()
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
// Even though 1ns has passed, we can untrash because we
// haven't called EmptyTrash yet.
- err = v.Untrash(TestHash)
+ err = v.BlockUntrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
err = checkGet()
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// Trash it again, and this time call EmptyTrash so it really
// goes away.
// (In Azure volumes, un/trash changes Mtime, so first backdate again)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
- _ = v.Trash(TestHash)
+ _ = v.BlockTrash(TestHash)
err = checkGet()
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
v.EmptyTrash()
// Untrash won't find it
- err = v.Untrash(TestHash)
+ err = v.BlockUntrash(TestHash)
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
// Get block won't find it
err = checkGet()
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
// Third set: If the same data block gets written again after
// being trashed, and then the trash gets emptied, the newer
// un-trashed copy doesn't get deleted along with it.
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
s.cluster.Collections.BlobTrashLifetime.Set("1ns")
- err = v.Trash(TestHash)
+ err = v.BlockTrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
err = checkGet()
if err == nil || !os.IsNotExist(err) {
- t.Fatalf("os.IsNotExist(%v) should have been true", err)
+ t.Errorf("os.IsNotExist(%v) should have been true", err)
}
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
// EmptyTrash should not delete the untrashed copy.
v.EmptyTrash()
err = checkGet()
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// Fourth set: If the same data block gets trashed twice with
@@ -1074,33 +881,33 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
// at intermediate time B (A < B < C), it is still possible to
// untrash the block whose deadline is "C".
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
s.cluster.Collections.BlobTrashLifetime.Set("1ns")
- err = v.Trash(TestHash)
+ err = v.BlockTrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
- v.PutRaw(TestHash, TestBlock)
+ v.BlockWrite(context.Background(), TestHash, TestBlock)
v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
s.cluster.Collections.BlobTrashLifetime.Set("1h")
- err = v.Trash(TestHash)
+ err = v.BlockTrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
// EmptyTrash should not prevent us from recovering the
// time.Hour ("C") trash
v.EmptyTrash()
- err = v.Untrash(TestHash)
+ err = v.BlockUntrash(TestHash)
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
err = checkGet()
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
}
diff --git a/services/keepstore/volume_test.go b/services/keepstore/volume_test.go
index 950b3989aa..f64041b048 100644
--- a/services/keepstore/volume_test.go
+++ b/services/keepstore/volume_test.go
@@ -5,25 +5,13 @@
package keepstore
import (
- "bytes"
- "context"
- "crypto/md5"
- "errors"
- "fmt"
- "io"
- "os"
- "strings"
"sync"
"time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "github.com/sirupsen/logrus"
)
var (
- TestBlock = []byte("The quick brown fox jumps over the lazy dog.")
- TestHash = "e4d909c290d0fb1ca068ffaddf22cbd0"
- TestHashPutResp = "e4d909c290d0fb1ca068ffaddf22cbd0+44\n"
+ TestBlock = []byte("The quick brown fox jumps over the lazy dog.")
+ TestHash = "e4d909c290d0fb1ca068ffaddf22cbd0"
TestBlock2 = []byte("Pack my box with five dozen liquor jugs.")
TestHash2 = "f15ac516f788aec4f30932ffb6395c39"
@@ -31,10 +19,6 @@ var (
TestBlock3 = []byte("Now is the time for all good men to come to the aid of their country.")
TestHash3 = "eed29bbffbc2dbe5e5ee0bb71888e61f"
- // BadBlock is used to test collisions and corruption.
- // It must not match any test hashes.
- BadBlock = []byte("The magic words are squeamish ossifrage.")
-
EmptyHash = "d41d8cd98f00b204e9800998ecf8427e"
EmptyBlock = []byte("")
)
@@ -43,230 +27,64 @@ var (
// underlying Volume, in order to test behavior in cases that are
// impractical to achieve with a sequence of normal Volume operations.
type TestableVolume interface {
- Volume
-
- // [Over]write content for a locator with the given data,
- // bypassing all constraints like readonly and serialize.
- PutRaw(locator string, data []byte)
+ volume
// Returns the strings that a driver uses to record read/write operations.
ReadWriteOperationLabelValues() (r, w string)
// Specify the value Mtime() should return, until the next
- // call to Touch, TouchWithDate, or Put.
- TouchWithDate(locator string, lastPut time.Time)
+ // call to Touch, TouchWithDate, or BlockWrite.
+ TouchWithDate(locator string, lastBlockWrite time.Time)
// Clean up, delete temporary files.
Teardown()
}
-func init() {
- driver["mock"] = newMockVolume
-}
-
-// MockVolumes are test doubles for Volumes, used to test handlers.
-type MockVolume struct {
- Store map[string][]byte
- Timestamps map[string]time.Time
-
- // Bad volumes return an error for every operation.
- Bad bool
- BadVolumeError error
-
- // Touchable volumes' Touch() method succeeds for a locator
- // that has been Put().
- Touchable bool
-
- // Gate is a "starting gate", allowing test cases to pause
- // volume operations long enough to inspect state. Every
- // operation (except Status) starts by receiving from
- // Gate. Sending one value unblocks one operation; closing the
- // channel unblocks all operations. By default, Gate is a
- // closed channel, so all operations proceed without
- // blocking. See trash_worker_test.go for an example.
- Gate chan struct{} `json:"-"`
-
- cluster *arvados.Cluster
- volume arvados.Volume
- logger logrus.FieldLogger
- metrics *volumeMetricsVecs
- called map[string]int
- mutex sync.Mutex
-}
-
-// newMockVolume returns a non-Bad, non-Readonly, Touchable mock
-// volume.
-func newMockVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- gate := make(chan struct{})
- close(gate)
- return &MockVolume{
- Store: make(map[string][]byte),
- Timestamps: make(map[string]time.Time),
- Bad: false,
- Touchable: true,
- called: map[string]int{},
- Gate: gate,
- cluster: cluster,
- volume: volume,
- logger: logger,
- metrics: metrics,
- }, nil
+// brbuffer is like bytes.Buffer, but it implements io.WriterAt.
+// Convenient for testing (volume)BlockRead implementations.
+type brbuffer struct {
+ mtx sync.Mutex
+ buf []byte
}
-// CallCount returns how many times the named method has been called.
-func (v *MockVolume) CallCount(method string) int {
- v.mutex.Lock()
- defer v.mutex.Unlock()
- c, ok := v.called[method]
- if !ok {
- return 0
+func (b *brbuffer) WriteAt(p []byte, offset int64) (int, error) {
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+ if short := int(offset) + len(p) - len(b.buf); short > 0 {
+ b.buf = append(b.buf, make([]byte, short)...)
}
- return c
+ return copy(b.buf[offset:], p), nil
}
-func (v *MockVolume) gotCall(method string) {
- v.mutex.Lock()
- defer v.mutex.Unlock()
- if _, ok := v.called[method]; !ok {
- v.called[method] = 1
- } else {
- v.called[method]++
- }
+func (b *brbuffer) Bytes() []byte {
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+ return b.buf
}
-func (v *MockVolume) Compare(ctx context.Context, loc string, buf []byte) error {
- v.gotCall("Compare")
- <-v.Gate
- if v.Bad {
- return v.BadVolumeError
- } else if block, ok := v.Store[loc]; ok {
- if fmt.Sprintf("%x", md5.Sum(block)) != loc {
- return DiskHashError
- }
- if bytes.Compare(buf, block) != 0 {
- return CollisionError
- }
- return nil
- } else {
- return os.ErrNotExist
- }
+func (b *brbuffer) String() string {
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+ return string(b.buf)
}
-func (v *MockVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- v.gotCall("Get")
- <-v.Gate
- if v.Bad {
- return 0, v.BadVolumeError
- } else if block, ok := v.Store[loc]; ok {
- copy(buf[:len(block)], block)
- return len(block), nil
- }
- return 0, os.ErrNotExist
+func (b *brbuffer) Len() int {
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+ return len(b.buf)
}
-func (v *MockVolume) Put(ctx context.Context, loc string, block []byte) error {
- v.gotCall("Put")
- <-v.Gate
- if v.Bad {
- return v.BadVolumeError
- }
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- v.Store[loc] = block
- return v.Touch(loc)
+func (b *brbuffer) Reset() {
+ b.mtx.Lock()
+ defer b.mtx.Unlock()
+ b.buf = nil
}
-func (v *MockVolume) Touch(loc string) error {
- return v.TouchWithDate(loc, time.Now())
-}
+// a brdiscarder is like io.Discard, but it implements
+// io.WriterAt. Convenient for testing (volume)BlockRead
+// implementations when the output is not checked.
+type brdiscarder struct{}
-func (v *MockVolume) TouchWithDate(loc string, t time.Time) error {
- v.gotCall("Touch")
- <-v.Gate
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- if _, exists := v.Store[loc]; !exists {
- return os.ErrNotExist
- }
- if v.Touchable {
- v.Timestamps[loc] = t
- return nil
- }
- return errors.New("Touch failed")
-}
+func (brdiscarder) WriteAt(p []byte, offset int64) (int, error) { return len(p), nil }
-func (v *MockVolume) Mtime(loc string) (time.Time, error) {
- v.gotCall("Mtime")
- <-v.Gate
- var mtime time.Time
- var err error
- if v.Bad {
- err = v.BadVolumeError
- } else if t, ok := v.Timestamps[loc]; ok {
- mtime = t
- } else {
- err = os.ErrNotExist
- }
- return mtime, err
-}
-
-func (v *MockVolume) IndexTo(prefix string, w io.Writer) error {
- v.gotCall("IndexTo")
- <-v.Gate
- for loc, block := range v.Store {
- if !IsValidLocator(loc) || !strings.HasPrefix(loc, prefix) {
- continue
- }
- _, err := fmt.Fprintf(w, "%s+%d %d\n",
- loc, len(block), 123456789)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (v *MockVolume) Trash(loc string) error {
- v.gotCall("Delete")
- <-v.Gate
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
- if _, ok := v.Store[loc]; ok {
- if time.Since(v.Timestamps[loc]) < time.Duration(v.cluster.Collections.BlobSigningTTL) {
- return nil
- }
- delete(v.Store, loc)
- return nil
- }
- return os.ErrNotExist
-}
-
-func (v *MockVolume) GetDeviceID() string {
- return "mock-device-id"
-}
-
-func (v *MockVolume) Untrash(loc string) error {
- return nil
-}
-
-func (v *MockVolume) Status() *VolumeStatus {
- var used uint64
- for _, block := range v.Store {
- used = used + uint64(len(block))
- }
- return &VolumeStatus{"/bogo", 123, 1000000 - used, used}
-}
-
-func (v *MockVolume) String() string {
- return "[MockVolume]"
-}
-
-func (v *MockVolume) EmptyTrash() {
-}
-
-func (v *MockVolume) GetStorageClasses() []string {
- return nil
-}
+var brdiscard = brdiscarder{}
diff --git a/services/keepstore/work_queue.go b/services/keepstore/work_queue.go
deleted file mode 100644
index 4c46ec8e65..0000000000
--- a/services/keepstore/work_queue.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-/* A WorkQueue is an asynchronous thread-safe queue manager. It
- provides a channel from which items can be read off the queue, and
- permits replacing the contents of the queue at any time.
-
- The overall work flow for a WorkQueue is as follows:
-
- 1. A WorkQueue is created with NewWorkQueue(). This
- function instantiates a new WorkQueue and starts a manager
- goroutine. The manager listens on an input channel
- (manager.newlist) and an output channel (manager.NextItem).
-
- 2. The manager first waits for a new list of requests on the
- newlist channel. When another goroutine calls
- manager.ReplaceQueue(lst), it sends lst over the newlist
- channel to the manager. The manager goroutine now has
- ownership of the list.
-
- 3. Once the manager has this initial list, it listens on both the
- input and output channels for one of the following to happen:
-
- a. A worker attempts to read an item from the NextItem
- channel. The manager sends the next item from the list
- over this channel to the worker, and loops.
-
- b. New data is sent to the manager on the newlist channel.
- This happens when another goroutine calls
- manager.ReplaceItem() with a new list. The manager
- discards the current list, replaces it with the new one,
- and begins looping again.
-
- c. The input channel is closed. The manager closes its
- output channel (signalling any workers to quit) and
- terminates.
-
- Tasks currently handled by WorkQueue:
- * the pull list
- * the trash list
-
- Example usage:
-
- // Any kind of user-defined type can be used with the
- // WorkQueue.
- type FrobRequest struct {
- frob string
- }
-
- // Make a work list.
- froblist := NewWorkQueue()
-
- // Start a concurrent worker to read items from the NextItem
- // channel until it is closed, deleting each one.
- go func(list WorkQueue) {
- for i := range list.NextItem {
- req := i.(FrobRequest)
- frob.Run(req)
- }
- }(froblist)
-
- // Set up a HTTP handler for PUT /frob
- router.HandleFunc(`/frob`,
- func(w http.ResponseWriter, req *http.Request) {
- // Parse the request body into a list.List
- // of FrobRequests, and give this list to the
- // frob manager.
- newfrobs := parseBody(req.Body)
- froblist.ReplaceQueue(newfrobs)
- }).Methods("PUT")
-
- Methods available on a WorkQueue:
-
- ReplaceQueue(list)
- Replaces the current item list with a new one. The list
- manager discards any unprocessed items on the existing
- list and replaces it with the new one. If the worker is
- processing a list item when ReplaceQueue is called, it
- finishes processing before receiving items from the new
- list.
- Close()
- Shuts down the manager goroutine. When Close is called,
- the manager closes the NextItem channel.
-*/
-
-import "container/list"
-
-// WorkQueue definition
-type WorkQueue struct {
- getStatus chan WorkQueueStatus
- newlist chan *list.List
- // Workers get work items by reading from this channel.
- NextItem <-chan interface{}
- // Each worker must send struct{}{} to DoneItem exactly once
- // for each work item received from NextItem, when it stops
- // working on that item (regardless of whether the work was
- // successful).
- DoneItem chan<- struct{}
-}
-
-// WorkQueueStatus reflects the queue status.
-type WorkQueueStatus struct {
- InProgress int
- Queued int
-}
-
-// NewWorkQueue returns a new empty WorkQueue.
-//
-func NewWorkQueue() *WorkQueue {
- nextItem := make(chan interface{})
- reportDone := make(chan struct{})
- newList := make(chan *list.List)
- b := WorkQueue{
- getStatus: make(chan WorkQueueStatus),
- newlist: newList,
- NextItem: nextItem,
- DoneItem: reportDone,
- }
- go func() {
- // Read new work lists from the newlist channel.
- // Reply to "status" and "get next item" queries by
- // sending to the getStatus and nextItem channels
- // respectively. Return when the newlist channel
- // closes.
-
- todo := &list.List{}
- status := WorkQueueStatus{}
-
- // When we're done, close the output channel; workers will
- // shut down next time they ask for new work.
- defer close(nextItem)
- defer close(b.getStatus)
-
- // nextChan and nextVal are both nil when we have
- // nothing to send; otherwise they are, respectively,
- // the nextItem channel and the next work item to send
- // to it.
- var nextChan chan interface{}
- var nextVal interface{}
-
- for newList != nil || status.InProgress > 0 {
- select {
- case p, ok := <-newList:
- if !ok {
- // Closed, stop receiving
- newList = nil
- }
- todo = p
- if todo == nil {
- todo = &list.List{}
- }
- status.Queued = todo.Len()
- if status.Queued == 0 {
- // Stop sending work
- nextChan = nil
- nextVal = nil
- } else {
- nextChan = nextItem
- nextVal = todo.Front().Value
- }
- case nextChan <- nextVal:
- todo.Remove(todo.Front())
- status.InProgress++
- status.Queued--
- if status.Queued == 0 {
- // Stop sending work
- nextChan = nil
- nextVal = nil
- } else {
- nextVal = todo.Front().Value
- }
- case <-reportDone:
- status.InProgress--
- case b.getStatus <- status:
- }
- }
- }()
- return &b
-}
-
-// ReplaceQueue abandons any work items left in the existing queue,
-// and starts giving workers items from the given list. After giving
-// it to ReplaceQueue, the caller must not read or write the given
-// list.
-//
-func (b *WorkQueue) ReplaceQueue(list *list.List) {
- b.newlist <- list
-}
-
-// Close shuts down the manager and terminates the goroutine, which
-// abandons any pending requests, but allows any pull request already
-// in progress to continue.
-//
-// After Close, Status will return correct values, NextItem will be
-// closed, and ReplaceQueue will panic.
-//
-func (b *WorkQueue) Close() {
- close(b.newlist)
-}
-
-// Status returns an up-to-date WorkQueueStatus reflecting the current
-// queue status.
-//
-func (b *WorkQueue) Status() WorkQueueStatus {
- // If the channel is closed, we get the nil value of
- // WorkQueueStatus, which is an accurate description of a
- // finished queue.
- return <-b.getStatus
-}
diff --git a/services/keepstore/work_queue_test.go b/services/keepstore/work_queue_test.go
deleted file mode 100644
index 254f96cb2d..0000000000
--- a/services/keepstore/work_queue_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
- "container/list"
- "runtime"
- "testing"
- "time"
-)
-
-type fatalfer interface {
- Fatalf(string, ...interface{})
-}
-
-func makeTestWorkList(ary []interface{}) *list.List {
- l := list.New()
- for _, n := range ary {
- l.PushBack(n)
- }
- return l
-}
-
-func expectChannelEmpty(t fatalfer, c <-chan interface{}) {
- select {
- case item, ok := <-c:
- if ok {
- t.Fatalf("Received value (%+v) from channel that we expected to be empty", item)
- }
- default:
- }
-}
-
-func expectChannelNotEmpty(t fatalfer, c <-chan interface{}) interface{} {
- select {
- case item, ok := <-c:
- if !ok {
- t.Fatalf("expected data on a closed channel")
- }
- return item
- case <-time.After(time.Second):
- t.Fatalf("expected data on an empty channel")
- return nil
- }
-}
-
-func expectChannelClosedWithin(t fatalfer, timeout time.Duration, c <-chan interface{}) {
- select {
- case received, ok := <-c:
- if ok {
- t.Fatalf("Expected channel to be closed, but received %+v instead", received)
- }
- case <-time.After(timeout):
- t.Fatalf("Expected channel to be closed, but it is still open after %v", timeout)
- }
-}
-
-func doWorkItems(t fatalfer, q *WorkQueue, expected []interface{}) {
- for i := range expected {
- actual, ok := <-q.NextItem
- if !ok {
- t.Fatalf("Expected %+v but channel was closed after receiving %+v as expected.", expected, expected[:i])
- }
- q.DoneItem <- struct{}{}
- if actual.(int) != expected[i] {
- t.Fatalf("Expected %+v but received %+v after receiving %+v as expected.", expected[i], actual, expected[:i])
- }
- }
-}
-
-func expectEqualWithin(t fatalfer, timeout time.Duration, expect interface{}, f func() interface{}) {
- ok := make(chan struct{})
- giveup := false
- go func() {
- for f() != expect && !giveup {
- time.Sleep(time.Millisecond)
- }
- close(ok)
- }()
- select {
- case <-ok:
- case <-time.After(timeout):
- giveup = true
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("Still getting %+v, timed out waiting for %+v\n%s:%d", f(), expect, file, line)
- }
-}
-
-func expectQueued(t fatalfer, b *WorkQueue, expectQueued int) {
- if l := b.Status().Queued; l != expectQueued {
- t.Fatalf("Got Queued==%d, expected %d", l, expectQueued)
- }
-}
-
-func TestWorkQueueDoneness(t *testing.T) {
- b := NewWorkQueue()
- defer b.Close()
- b.ReplaceQueue(makeTestWorkList([]interface{}{1, 2, 3}))
- expectQueued(t, b, 3)
- gate := make(chan struct{})
- go func() {
- <-gate
- for range b.NextItem {
- <-gate
- time.Sleep(time.Millisecond)
- b.DoneItem <- struct{}{}
- }
- }()
- expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
- b.ReplaceQueue(makeTestWorkList([]interface{}{4, 5, 6}))
- for i := 1; i <= 3; i++ {
- gate <- struct{}{}
- expectEqualWithin(t, time.Second, 3-i, func() interface{} { return b.Status().Queued })
- expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
- }
- close(gate)
- expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
- expectChannelEmpty(t, b.NextItem)
-}
-
-// Create a WorkQueue, generate a list for it, and instantiate a worker.
-func TestWorkQueueReadWrite(t *testing.T) {
- var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-
- b := NewWorkQueue()
- expectQueued(t, b, 0)
-
- b.ReplaceQueue(makeTestWorkList(input))
- expectQueued(t, b, len(input))
-
- doWorkItems(t, b, input)
- expectChannelEmpty(t, b.NextItem)
- b.Close()
-}
-
-// Start a worker before the list has any input.
-func TestWorkQueueEarlyRead(t *testing.T) {
- var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-
- b := NewWorkQueue()
- defer b.Close()
-
- // First, demonstrate that nothing is available on the NextItem
- // channel.
- expectChannelEmpty(t, b.NextItem)
-
- // Start a reader in a goroutine. The reader will block until the
- // block work list has been initialized.
- //
- done := make(chan int)
- go func() {
- doWorkItems(t, b, input)
- done <- 1
- }()
-
- // Feed the blocklist a new worklist, and wait for the worker to
- // finish.
- b.ReplaceQueue(makeTestWorkList(input))
- <-done
- expectQueued(t, b, 0)
-}
-
-// After Close(), NextItem closes, work finishes, then stats return zero.
-func TestWorkQueueClose(t *testing.T) {
- b := NewWorkQueue()
- input := []interface{}{1, 2, 3, 4, 5, 6, 7, 8}
- mark := make(chan struct{})
- go func() {
- <-b.NextItem
- mark <- struct{}{}
- <-mark
- b.DoneItem <- struct{}{}
- }()
- b.ReplaceQueue(makeTestWorkList(input))
- // Wait for worker to take item 1
- <-mark
- b.Close()
- expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
- // Tell worker to report done
- mark <- struct{}{}
- expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
- expectChannelClosedWithin(t, time.Second, b.NextItem)
-}
-
-// Show that a reader may block when the manager's list is exhausted,
-// and that the reader resumes automatically when new data is
-// available.
-func TestWorkQueueReaderBlocks(t *testing.T) {
- var (
- inputBeforeBlock = []interface{}{1, 2, 3, 4, 5}
- inputAfterBlock = []interface{}{6, 7, 8, 9, 10}
- )
-
- b := NewWorkQueue()
- defer b.Close()
- sendmore := make(chan int)
- done := make(chan int)
- go func() {
- doWorkItems(t, b, inputBeforeBlock)
-
- // Confirm that the channel is empty, so a subsequent read
- // on it will block.
- expectChannelEmpty(t, b.NextItem)
-
- // Signal that we're ready for more input.
- sendmore <- 1
- doWorkItems(t, b, inputAfterBlock)
- done <- 1
- }()
-
- // Write a slice of the first five elements and wait for the
- // reader to signal that it's ready for us to send more input.
- b.ReplaceQueue(makeTestWorkList(inputBeforeBlock))
- <-sendmore
-
- b.ReplaceQueue(makeTestWorkList(inputAfterBlock))
-
- // Wait for the reader to complete.
- <-done
-}
-
-// Replace one active work list with another.
-func TestWorkQueueReplaceQueue(t *testing.T) {
- var firstInput = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
- var replaceInput = []interface{}{1, 4, 9, 16, 25, 36, 49, 64, 81}
-
- b := NewWorkQueue()
- b.ReplaceQueue(makeTestWorkList(firstInput))
-
- // Read just the first five elements from the work list.
- // Confirm that the channel is not empty.
- doWorkItems(t, b, firstInput[0:5])
- expectChannelNotEmpty(t, b.NextItem)
-
- // Replace the work list and read five more elements.
- // The old list should have been discarded and all new
- // elements come from the new list.
- b.ReplaceQueue(makeTestWorkList(replaceInput))
- doWorkItems(t, b, replaceInput[0:5])
-
- b.Close()
-}
diff --git a/services/login-sync/Gemfile b/services/login-sync/Gemfile
index 420b152861..e49cd617f0 100644
--- a/services/login-sync/Gemfile
+++ b/services/login-sync/Gemfile
@@ -5,7 +5,7 @@
source 'https://rubygems.org'
gemspec
group :test, :performance do
- gem 'minitest', '>= 5.0.0'
- gem 'mocha', '>= 1.5.0', require: false
+ gem 'minitest', '>= 5'
+ gem 'mocha', '>= 2.1', require: false
gem 'rake'
end
diff --git a/services/login-sync/arvados-login-sync.gemspec b/services/login-sync/arvados-login-sync.gemspec
index f7fe4bc164..008f13d8b8 100644
--- a/services/login-sync/arvados-login-sync.gemspec
+++ b/services/login-sync/arvados-login-sync.gemspec
@@ -36,14 +36,15 @@ Gem::Specification.new do |s|
s.licenses = ['AGPL-3.0']
s.files = ["bin/arvados-login-sync", "agpl-3.0.txt"]
s.executables << "arvados-login-sync"
- s.required_ruby_version = '>= 2.1.0'
- s.add_runtime_dependency 'arvados', '>= 1.3.3.20190320201707'
+ s.required_ruby_version = '>= 2.5.0'
+ # The minimum version's 'a' suffix is necessary to enable bundler
+ # to consider 'pre-release' versions. See:
+ # https://github.com/rubygems/bundler/issues/4340
+ s.add_runtime_dependency 'arvados', '~> 2.8.a'
s.add_runtime_dependency 'launchy', '< 2.5'
- # We need at least version 0.8.7.3, cf. https://dev.arvados.org/issues/15673
- s.add_dependency('arvados-google-api-client', '>= 0.8.7.3', '< 0.8.9')
- # arvados-google-api-client (and thus arvados) gems
- # depend on signet, but signet 0.12 is incompatible with ruby 2.3.
- s.add_dependency('signet', '< 0.12')
+ # arvados fork of google-api-client gem with old API and new
+ # compatibility fixes, built from ../../sdk/ruby-google-api-client/
+ s.add_runtime_dependency('arvados-google-api-client', '>= 0.8.7.5', '< 0.8.9')
s.homepage =
'https://arvados.org'
end
diff --git a/services/login-sync/bin/arvados-login-sync b/services/login-sync/bin/arvados-login-sync
index 5c6691ab95..cbe8520a00 100755
--- a/services/login-sync/bin/arvados-login-sync
+++ b/services/login-sync/bin/arvados-login-sync
@@ -12,6 +12,18 @@ require 'yaml'
require 'optparse'
require 'open3'
+def ensure_dir(path, mode, owner, group)
+ begin
+ Dir.mkdir(path, mode)
+ rescue Errno::EEXIST
+ # No change needed
+ false
+ else
+ FileUtils.chown(owner, group, path)
+ true
+ end
+end
+
req_envs = %w(ARVADOS_API_HOST ARVADOS_API_TOKEN ARVADOS_VIRTUAL_MACHINE_UUID)
req_envs.each do |k|
unless ENV[k]
@@ -34,6 +46,15 @@ exclusive_banner = "############################################################
start_banner = "### BEGIN Arvados-managed keys -- changes between markers will be overwritten\n"
end_banner = "### END Arvados-managed keys -- changes between markers will be overwritten\n"
+actions = {
+ # These names correspond to the names in the cluster Users configuration.
+ # Managing everything was the original behavior.
+ SyncUserAccounts: true,
+ SyncUserGroups: true,
+ SyncUserSSHKeys: true,
+ SyncUserAPITokens: true,
+}
+
keys = ''
begin
@@ -42,9 +63,25 @@ begin
debug = true
end
arv = Arvados.new({ :suppress_ssl_warnings => false })
- logincluster_arv = Arvados.new({ :api_host => (ENV['LOGINCLUSTER_ARVADOS_API_HOST'] || ENV['ARVADOS_API_HOST']),
- :api_token => (ENV['LOGINCLUSTER_ARVADOS_API_TOKEN'] || ENV['ARVADOS_API_TOKEN']),
- :suppress_ssl_warnings => false })
+ logincluster_host = ENV['ARVADOS_API_HOST']
+ logincluster_name = arv.cluster_config['Login']['LoginCluster'] or ''
+
+ # Requiring the fuse group was previous hardcoded behavior
+ minimum_groups = arv.cluster_config['Users']['SyncRequiredGroups'] || ['fuse']
+ ignored_groups = arv.cluster_config['Users']['SyncIgnoredGroups'] || []
+ (minimum_groups & ignored_groups).each do |group_name|
+ STDERR.puts "WARNING: #{group_name} is listed in both SyncRequiredGroups and SyncIgnoredGroups. It will be ignored."
+ end
+
+ actions.each_pair do |key, default|
+ actions[key] = arv.cluster_config['Users'].fetch(key.to_s, default)
+ end
+
+ if logincluster_name != '' and logincluster_name != arv.cluster_config['ClusterID']
+ logincluster_host = arv.cluster_config['RemoteClusters'][logincluster_name]['Host']
+ end
+ logincluster_arv = Arvados.new({ :api_host => logincluster_host,
+ :suppress_ssl_warnings => false })
vm_uuid = ENV['ARVADOS_VIRTUAL_MACHINE_UUID']
@@ -107,11 +144,12 @@ begin
seen = Hash.new()
- current_user_groups = Hash.new
+ all_groups = []
+ current_user_groups = Hash.new { |hash, key| hash[key] = [] }
while (ent = Etc.getgrent()) do
+ all_groups << ent.name
ent.mem.each do |member|
- current_user_groups[member] ||= Array.new
- current_user_groups[member].push ent.name
+ current_user_groups[member] << ent.name
end
end
Etc.endgrent()
@@ -123,6 +161,10 @@ begin
username = l[:username]
unless pwnam[l[:username]]
+ unless actions[:SyncUserAccounts]
+ STDERR.puts "User #{username} does not exist and SyncUserAccounts=false. Skipping."
+ next
+ end
STDERR.puts "Creating account #{l[:username]}"
# Create new user
out, st = Open3.capture2e("useradd", "-m",
@@ -141,15 +183,21 @@ begin
end
end
- existing_groups = current_user_groups[username] || []
- groups = l[:groups] || []
- # Adding users to the FUSE group has long been hardcoded behavior.
- groups << "fuse"
- groups << username
- groups.select! { |g| Etc.getgrnam(g) rescue false }
+ user_gid = pwnam[username].gid
+ homedir = pwnam[l[:username]].dir
+ if !File.exist?(homedir)
+ STDERR.puts "Cannot set up user #{username} because their home directory #{homedir} does not exist. Skipping."
+ next
+ end
+
+ if actions[:SyncUserGroups]
+ have_groups = current_user_groups[username] - ignored_groups
+ want_groups = l[:groups] || []
+ want_groups |= minimum_groups
+ want_groups -= ignored_groups
+ want_groups &= all_groups
- groups.each do |addgroup|
- if existing_groups.index(addgroup).nil?
+ (want_groups - have_groups).each do |addgroup|
# User should be in group, but isn't, so add them.
STDERR.puts "Add user #{username} to #{addgroup} group"
out, st = Open3.capture2e("usermod", "-aG", addgroup, username)
@@ -157,10 +205,8 @@ begin
STDERR.puts "Failed to add #{username} to #{addgroup} group:\n#{out}"
end
end
- end
- existing_groups.each do |removegroup|
- if groups.index(removegroup).nil?
+ (have_groups - want_groups).each do |removegroup|
# User is in a group, but shouldn't be, so remove them.
STDERR.puts "Remove user #{username} from #{removegroup} group"
out, st = Open3.capture2e("gpasswd", "-d", username, removegroup)
@@ -170,96 +216,86 @@ begin
end
end
- homedir = pwnam[l[:username]].dir
- userdotssh = File.join(homedir, ".ssh")
- Dir.mkdir(userdotssh) if !File.exist?(userdotssh)
+ if actions[:SyncUserSSHKeys]
+ userdotssh = File.join(homedir, ".ssh")
+ ensure_dir(userdotssh, 0700, username, user_gid)
- newkeys = "###\n###\n" + keys[l[:username]].join("\n") + "\n###\n###\n"
+ newkeys = "###\n###\n" + keys[l[:username]].join("\n") + "\n###\n###\n"
- keysfile = File.join(userdotssh, "authorized_keys")
+ keysfile = File.join(userdotssh, "authorized_keys")
+ begin
+ oldkeys = File.read(keysfile)
+ rescue Errno::ENOENT
+ oldkeys = ""
+ end
- if File.exist?(keysfile)
- oldkeys = IO::read(keysfile)
- else
- oldkeys = ""
- end
+ if options[:exclusive]
+ newkeys = exclusive_banner + newkeys
+ elsif oldkeys.start_with?(exclusive_banner)
+ newkeys = start_banner + newkeys + end_banner
+ elsif (m = /^(.*?\n|)#{start_banner}(.*?\n|)#{end_banner}(.*)/m.match(oldkeys))
+ newkeys = m[1] + start_banner + newkeys + end_banner + m[3]
+ else
+ newkeys = start_banner + newkeys + end_banner + oldkeys
+ end
- if options[:exclusive]
- newkeys = exclusive_banner + newkeys
- elsif oldkeys.start_with?(exclusive_banner)
- newkeys = start_banner + newkeys + end_banner
- elsif (m = /^(.*?\n|)#{start_banner}(.*?\n|)#{end_banner}(.*)/m.match(oldkeys))
- newkeys = m[1] + start_banner + newkeys + end_banner + m[3]
- else
- newkeys = start_banner + newkeys + end_banner + oldkeys
+ if oldkeys != newkeys then
+ File.open(keysfile, 'w', 0600) do |f|
+ f.write(newkeys)
+ end
+ FileUtils.chown(username, user_gid, keysfile)
+ end
end
- if oldkeys != newkeys then
- f = File.new(keysfile, 'w')
- f.write(newkeys)
- f.close()
- end
+ if actions[:SyncUserAPITokens]
+ userdotconfig = File.join(homedir, ".config")
+ ensure_dir(userdotconfig, 0755, username, user_gid)
+ configarvados = File.join(userdotconfig, "arvados")
+ ensure_dir(configarvados, 0700, username, user_gid)
- userdotconfig = File.join(homedir, ".config")
- if !File.exist?(userdotconfig)
- Dir.mkdir(userdotconfig)
- end
+ tokenfile = File.join(configarvados, "settings.conf")
- configarvados = File.join(userdotconfig, "arvados")
- Dir.mkdir(configarvados) if !File.exist?(configarvados)
-
- tokenfile = File.join(configarvados, "settings.conf")
-
- begin
- STDERR.puts "Processing #{tokenfile} ..." if debug
- newToken = false
- if File.exist?(tokenfile)
- # check if the token is still valid
- myToken = ENV["ARVADOS_API_TOKEN"]
- userEnv = IO::read(tokenfile)
- if (m = /^ARVADOS_API_TOKEN=(.*?\n)/m.match(userEnv))
- begin
- tmp_arv = Arvados.new({ :api_host => (ENV['LOGINCLUSTER_ARVADOS_API_HOST'] || ENV['ARVADOS_API_HOST']),
- :api_token => (m[1]),
- :suppress_ssl_warnings => false })
- tmp_arv.user.current
- rescue Arvados::TransactionFailedError => e
- if e.to_s =~ /401 Unauthorized/
- STDERR.puts "Account #{l[:username]} token not valid, creating new token."
- newToken = true
- else
- raise
+ begin
+ STDERR.puts "Processing #{tokenfile} ..." if debug
+ newToken = false
+ if File.exist?(tokenfile)
+ # check if the token is still valid
+ myToken = ENV["ARVADOS_API_TOKEN"]
+ userEnv = File.read(tokenfile)
+ if (m = /^ARVADOS_API_TOKEN=(.*?\n)/m.match(userEnv))
+ begin
+ tmp_arv = Arvados.new({ :api_host => logincluster_host,
+ :api_token => (m[1]),
+ :suppress_ssl_warnings => false })
+ tmp_arv.user.current
+ rescue Arvados::TransactionFailedError => e
+ if e.to_s =~ /401 Unauthorized/
+ STDERR.puts "Account #{l[:username]} token not valid, creating new token."
+ newToken = true
+ else
+ raise
+ end
end
end
+ elsif !File.exist?(tokenfile) || options[:"rotate-tokens"]
+ STDERR.puts "Account #{l[:username]} token file not found, creating new token."
+ newToken = true
end
- elsif !File.exist?(tokenfile) || options[:"rotate-tokens"]
- STDERR.puts "Account #{l[:username]} token file not found, creating new token."
- newToken = true
- end
- if newToken
- aca_params = {owner_uuid: l[:user_uuid], api_client_id: 0}
- if options[:"token-lifetime"] && options[:"token-lifetime"] > 0
- aca_params.merge!(expires_at: (Time.now + options[:"token-lifetime"]))
+ if newToken
+ aca_params = {owner_uuid: l[:user_uuid], api_client_id: 0}
+ if options[:"token-lifetime"] && options[:"token-lifetime"] > 0
+ aca_params.merge!(expires_at: (Time.now + options[:"token-lifetime"]))
+ end
+ user_token = logincluster_arv.api_client_authorization.create(api_client_authorization: aca_params)
+ File.open(tokenfile, 'w', 0600) do |f|
+ f.write("ARVADOS_API_HOST=#{ENV['ARVADOS_API_HOST']}\n")
+ f.write("ARVADOS_API_TOKEN=v2/#{user_token[:uuid]}/#{user_token[:api_token]}\n")
+ end
+ FileUtils.chown(username, user_gid, tokenfile)
end
- user_token = logincluster_arv.api_client_authorization.create(api_client_authorization: aca_params)
- f = File.new(tokenfile, 'w')
- f.write("ARVADOS_API_HOST=#{ENV['ARVADOS_API_HOST']}\n")
- f.write("ARVADOS_API_TOKEN=v2/#{user_token[:uuid]}/#{user_token[:api_token]}\n")
- f.close()
+ rescue => e
+ STDERR.puts "Error setting token for #{l[:username]}: #{e}"
end
- rescue => e
- STDERR.puts "Error setting token for #{l[:username]}: #{e}"
- end
-
- FileUtils.chown_R(l[:username], nil, userdotssh)
- FileUtils.chown_R(l[:username], nil, userdotconfig)
- File.chmod(0700, userdotssh)
- File.chmod(0700, userdotconfig)
- File.chmod(0700, configarvados)
- File.chmod(0750, homedir)
- File.chmod(0600, keysfile)
- if File.exist?(tokenfile)
- File.chmod(0600, tokenfile)
end
end
diff --git a/services/workbench2/.env b/services/workbench2/.env
new file mode 100644
index 0000000000..fd91b99c6a
--- /dev/null
+++ b/services/workbench2/.env
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+REACT_APP_ARVADOS_CONFIG_URL=/config.json
+REACT_APP_ARVADOS_API_HOST=c97qk.arvadosapi.com
+HTTPS=true
\ No newline at end of file
diff --git a/services/workbench2/.gitignore b/services/workbench2/.gitignore
new file mode 100644
index 0000000000..9093202ff8
--- /dev/null
+++ b/services/workbench2/.gitignore
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+# See https://help.github.com/ignore-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+
+# vscode
+/.vs
+
+# testing
+/coverage
+/cypress/videos
+/cypress/screenshots
+/cypress/downloads
+
+# production
+/build
+
+# misc
+.DS_Store
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+.npm.local
+
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+.idea
+.vscode
+.eslintcache
+/public/config.json
+/public/_health/
+
+# see https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/sdks
+!.yarn/versions
diff --git a/services/workbench2/.npmrc b/services/workbench2/.npmrc
new file mode 100644
index 0000000000..cffe8cdef1
--- /dev/null
+++ b/services/workbench2/.npmrc
@@ -0,0 +1 @@
+save-exact=true
diff --git a/services/workbench2/.yarn/releases/yarn-3.2.0.cjs b/services/workbench2/.yarn/releases/yarn-3.2.0.cjs
new file mode 100755
index 0000000000..b30d0655d0
--- /dev/null
+++ b/services/workbench2/.yarn/releases/yarn-3.2.0.cjs
@@ -0,0 +1,785 @@
+#!/usr/bin/env node
+/* eslint-disable */
+//prettier-ignore
+(()=>{var afe=Object.create,Oh=Object.defineProperty,Afe=Object.defineProperties,lfe=Object.getOwnPropertyDescriptor,cfe=Object.getOwnPropertyDescriptors,ufe=Object.getOwnPropertyNames,OE=Object.getOwnPropertySymbols,gfe=Object.getPrototypeOf,lQ=Object.prototype.hasOwnProperty,iM=Object.prototype.propertyIsEnumerable;var nM=(t,e,r)=>e in t?Oh(t,e,{enumerable:!0,configurable:!0,writable:!0,value:r}):t[e]=r,N=(t,e)=>{for(var r in e||(e={}))lQ.call(e,r)&&nM(t,r,e[r]);if(OE)for(var r of OE(e))iM.call(e,r)&&nM(t,r,e[r]);return t},te=(t,e)=>Afe(t,cfe(e)),ffe=t=>Oh(t,"__esModule",{value:!0});var Tr=(t,e)=>{var r={};for(var i in t)lQ.call(t,i)&&e.indexOf(i)<0&&(r[i]=t[i]);if(t!=null&&OE)for(var i of OE(t))e.indexOf(i)<0&&iM.call(t,i)&&(r[i]=t[i]);return r},hfe=(t,e)=>()=>(t&&(e=t(t=0)),e),w=(t,e)=>()=>(e||t((e={exports:{}}).exports,e),e.exports),ft=(t,e)=>{for(var r in e)Oh(t,r,{get:e[r],enumerable:!0})},pfe=(t,e,r)=>{if(e&&typeof e=="object"||typeof e=="function")for(let i of ufe(e))!lQ.call(t,i)&&i!=="default"&&Oh(t,i,{get:()=>e[i],enumerable:!(r=lfe(e,i))||r.enumerable});return t},ge=t=>pfe(ffe(Oh(t!=null?afe(gfe(t)):{},"default",t&&t.__esModule&&"default"in t?{get:()=>t.default,enumerable:!0}:{value:t,enumerable:!0})),t);var PM=w(($Xe,vM)=>{vM.exports=SM;SM.sync=Rfe;var kM=require("fs");function Ffe(t,e){var r=e.pathExt!==void 0?e.pathExt:process.env.PATHEXT;if(!r||(r=r.split(";"),r.indexOf("")!==-1))return!0;for(var i=0;i{DM.exports=RM;RM.sync=Nfe;var FM=require("fs");function RM(t,e,r){FM.stat(t,function(i,n){r(i,i?!1:NM(n,e))})}function Nfe(t,e){return NM(FM.statSync(t),e)}function NM(t,e){return t.isFile()&&Lfe(t,e)}function Lfe(t,e){var r=t.mode,i=t.uid,n=t.gid,s=e.uid!==void 0?e.uid:process.getuid&&process.getuid(),o=e.gid!==void 0?e.gid:process.getgid&&process.getgid(),a=parseInt("100",8),l=parseInt("010",8),c=parseInt("001",8),u=a|l,g=r&c||r&l&&n===o||r&a&&i===s||r&u&&s===0;return g}});var OM=w((rZe,TM)=>{var tZe=require("fs"),XE;process.platform==="win32"||global.TESTING_WINDOWS?XE=PM():XE=LM();TM.exports=vQ;vQ.sync=Tfe;function vQ(t,e,r){if(typeof e=="function"&&(r=e,e={}),!r){if(typeof Promise!="function")throw new TypeError("callback not provided");return new Promise(function(i,n){vQ(t,e||{},function(s,o){s?n(s):i(o)})})}XE(t,e||{},function(i,n){i&&(i.code==="EACCES"||e&&e.ignoreErrors)&&(i=null,n=!1),r(i,n)})}function Tfe(t,e){try{return XE.sync(t,e||{})}catch(r){if(e&&e.ignoreErrors||r.code==="EACCES")return!1;throw r}}});var YM=w((iZe,MM)=>{var Ju=process.platform==="win32"||process.env.OSTYPE==="cygwin"||process.env.OSTYPE==="msys",UM=require("path"),Ofe=Ju?";":":",KM=OM(),HM=t=>Object.assign(new Error(`not found: ${t}`),{code:"ENOENT"}),jM=(t,e)=>{let r=e.colon||Ofe,i=t.match(/\//)||Ju&&t.match(/\\/)?[""]:[...Ju?[process.cwd()]:[],...(e.path||process.env.PATH||"").split(r)],n=Ju?e.pathExt||process.env.PATHEXT||".EXE;.CMD;.BAT;.COM":"",s=Ju?n.split(r):[""];return Ju&&t.indexOf(".")!==-1&&s[0]!==""&&s.unshift(""),{pathEnv:i,pathExt:s,pathExtExe:n}},GM=(t,e,r)=>{typeof e=="function"&&(r=e,e={}),e||(e={});let{pathEnv:i,pathExt:n,pathExtExe:s}=jM(t,e),o=[],a=c=>new Promise((u,g)=>{if(c===i.length)return e.all&&o.length?u(o):g(HM(t));let f=i[c],h=/^".*"$/.test(f)?f.slice(1,-1):f,p=UM.join(h,t),m=!h&&/^\.[\\\/]/.test(t)?t.slice(0,2)+p:p;u(l(m,c,0))}),l=(c,u,g)=>new Promise((f,h)=>{if(g===n.length)return f(a(u+1));let p=n[g];KM(c+p,{pathExt:s},(m,y)=>{if(!m&&y)if(e.all)o.push(c+p);else return f(c+p);return f(l(c,u,g+1))})});return r?a(0).then(c=>r(null,c),r):a(0)},Mfe=(t,e)=>{e=e||{};let{pathEnv:r,pathExt:i,pathExtExe:n}=jM(t,e),s=[];for(let o=0;o{"use strict";var qM=(t={})=>{let e=t.env||process.env;return(t.platform||process.platform)!=="win32"?"PATH":Object.keys(e).reverse().find(i=>i.toUpperCase()==="PATH")||"Path"};SQ.exports=qM;SQ.exports.default=qM});var VM=w((sZe,WM)=>{"use strict";var zM=require("path"),Ufe=YM(),Kfe=JM();function _M(t,e){let r=t.options.env||process.env,i=process.cwd(),n=t.options.cwd!=null,s=n&&process.chdir!==void 0&&!process.chdir.disabled;if(s)try{process.chdir(t.options.cwd)}catch(a){}let o;try{o=Ufe.sync(t.command,{path:r[Kfe({env:r})],pathExt:e?zM.delimiter:void 0})}catch(a){}finally{s&&process.chdir(i)}return o&&(o=zM.resolve(n?t.options.cwd:"",o)),o}function Hfe(t){return _M(t)||_M(t,!0)}WM.exports=Hfe});var XM=w((oZe,kQ)=>{"use strict";var xQ=/([()\][%!^"`<>&|;, *?])/g;function jfe(t){return t=t.replace(xQ,"^$1"),t}function Gfe(t,e){return t=`${t}`,t=t.replace(/(\\*)"/g,'$1$1\\"'),t=t.replace(/(\\*)$/,"$1$1"),t=`"${t}"`,t=t.replace(xQ,"^$1"),e&&(t=t.replace(xQ,"^$1")),t}kQ.exports.command=jfe;kQ.exports.argument=Gfe});var $M=w((aZe,ZM)=>{"use strict";ZM.exports=/^#!(.*)/});var t1=w((AZe,e1)=>{"use strict";var Yfe=$M();e1.exports=(t="")=>{let e=t.match(Yfe);if(!e)return null;let[r,i]=e[0].replace(/#! ?/,"").split(" "),n=r.split("/").pop();return n==="env"?i:i?`${n} ${i}`:n}});var i1=w((lZe,r1)=>{"use strict";var PQ=require("fs"),qfe=t1();function Jfe(t){let e=150,r=Buffer.alloc(e),i;try{i=PQ.openSync(t,"r"),PQ.readSync(i,r,0,e,0),PQ.closeSync(i)}catch(n){}return qfe(r.toString())}r1.exports=Jfe});var a1=w((cZe,n1)=>{"use strict";var Wfe=require("path"),s1=VM(),o1=XM(),zfe=i1(),_fe=process.platform==="win32",Vfe=/\.(?:com|exe)$/i,Xfe=/node_modules[\\/].bin[\\/][^\\/]+\.cmd$/i;function Zfe(t){t.file=s1(t);let e=t.file&&zfe(t.file);return e?(t.args.unshift(t.file),t.command=e,s1(t)):t.file}function $fe(t){if(!_fe)return t;let e=Zfe(t),r=!Vfe.test(e);if(t.options.forceShell||r){let i=Xfe.test(e);t.command=Wfe.normalize(t.command),t.command=o1.command(t.command),t.args=t.args.map(s=>o1.argument(s,i));let n=[t.command].concat(t.args).join(" ");t.args=["/d","/s","/c",`"${n}"`],t.command=process.env.comspec||"cmd.exe",t.options.windowsVerbatimArguments=!0}return t}function ehe(t,e,r){e&&!Array.isArray(e)&&(r=e,e=null),e=e?e.slice(0):[],r=Object.assign({},r);let i={command:t,args:e,options:r,file:void 0,original:{command:t,args:e}};return r.shell?i:$fe(i)}n1.exports=ehe});var c1=w((uZe,A1)=>{"use strict";var DQ=process.platform==="win32";function RQ(t,e){return Object.assign(new Error(`${e} ${t.command} ENOENT`),{code:"ENOENT",errno:"ENOENT",syscall:`${e} ${t.command}`,path:t.command,spawnargs:t.args})}function the(t,e){if(!DQ)return;let r=t.emit;t.emit=function(i,n){if(i==="exit"){let s=l1(n,e,"spawn");if(s)return r.call(t,"error",s)}return r.apply(t,arguments)}}function l1(t,e){return DQ&&t===1&&!e.file?RQ(e.original,"spawn"):null}function rhe(t,e){return DQ&&t===1&&!e.file?RQ(e.original,"spawnSync"):null}A1.exports={hookChildProcess:the,verifyENOENT:l1,verifyENOENTSync:rhe,notFoundError:RQ}});var LQ=w((gZe,Wu)=>{"use strict";var u1=require("child_process"),FQ=a1(),NQ=c1();function g1(t,e,r){let i=FQ(t,e,r),n=u1.spawn(i.command,i.args,i.options);return NQ.hookChildProcess(n,i),n}function ihe(t,e,r){let i=FQ(t,e,r),n=u1.spawnSync(i.command,i.args,i.options);return n.error=n.error||NQ.verifyENOENTSync(n.status,i),n}Wu.exports=g1;Wu.exports.spawn=g1;Wu.exports.sync=ihe;Wu.exports._parse=FQ;Wu.exports._enoent=NQ});var h1=w((fZe,f1)=>{"use strict";function nhe(t,e){function r(){this.constructor=t}r.prototype=e.prototype,t.prototype=new r}function nc(t,e,r,i){this.message=t,this.expected=e,this.found=r,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,nc)}nhe(nc,Error);nc.buildMessage=function(t,e){var r={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;g>",ce=Ce(">>",!1),fe=">&",gt=Ce(">&",!1),Ht=">",Mt=Ce(">",!1),mi="<<<",jt=Ce("<<<",!1),Qr="<&",Ti=Ce("<&",!1),_s="<",Un=Ce("<",!1),Kn=function(C){return{type:"argument",segments:[].concat(...C)}},vr=function(C){return C},Hn="$'",us=Ce("$'",!1),Ia="'",SA=Ce("'",!1),Du=function(C){return[{type:"text",text:C}]},gs='""',kA=Ce('""',!1),ya=function(){return{type:"text",text:""}},Ru='"',xA=Ce('"',!1),PA=function(C){return C},Sr=function(C){return{type:"arithmetic",arithmetic:C,quoted:!0}},jl=function(C){return{type:"shell",shell:C,quoted:!0}},Fu=function(C){return te(N({type:"variable"},C),{quoted:!0})},So=function(C){return{type:"text",text:C}},Nu=function(C){return{type:"arithmetic",arithmetic:C,quoted:!1}},Qh=function(C){return{type:"shell",shell:C,quoted:!1}},vh=function(C){return te(N({type:"variable"},C),{quoted:!1})},oe=function(C){return{type:"glob",pattern:C}},Oi=/^[^']/,ko=_e(["'"],!0,!1),jn=function(C){return C.join("")},Lu=/^[^$"]/,vt=_e(["$",'"'],!0,!1),Gl=`\\
+`,Gn=Ce(`\\
+`,!1),fs=function(){return""},hs="\\",pt=Ce("\\",!1),xo=/^[\\$"`]/,lt=_e(["\\","$",'"',"`"],!1,!1),mn=function(C){return C},v="\\a",Tt=Ce("\\a",!1),Tu=function(){return"a"},Yl="\\b",Sh=Ce("\\b",!1),kh=function(){return"\b"},xh=/^[Ee]/,Ph=_e(["E","e"],!1,!1),Dh=function(){return""},G="\\f",yt=Ce("\\f",!1),DA=function(){return"\f"},$i="\\n",ql=Ce("\\n",!1),$e=function(){return`
+`},wa="\\r",Ou=Ce("\\r",!1),SE=function(){return"\r"},Rh="\\t",kE=Ce("\\t",!1),gr=function(){return" "},Yn="\\v",Jl=Ce("\\v",!1),Fh=function(){return"\v"},Vs=/^[\\'"?]/,Ba=_e(["\\","'",'"',"?"],!1,!1),En=function(C){return String.fromCharCode(parseInt(C,16))},Oe="\\x",Mu=Ce("\\x",!1),Wl="\\u",Xs=Ce("\\u",!1),zl="\\U",RA=Ce("\\U",!1),Uu=function(C){return String.fromCodePoint(parseInt(C,16))},Ku=/^[0-7]/,ba=_e([["0","7"]],!1,!1),Qa=/^[0-9a-fA-f]/,it=_e([["0","9"],["a","f"],["A","f"]],!1,!1),Po=ot(),FA="-",_l=Ce("-",!1),Zs="+",Vl=Ce("+",!1),xE=".",Nh=Ce(".",!1),Hu=function(C,b,F){return{type:"number",value:(C==="-"?-1:1)*parseFloat(b.join("")+"."+F.join(""))}},Lh=function(C,b){return{type:"number",value:(C==="-"?-1:1)*parseInt(b.join(""))}},PE=function(C){return N({type:"variable"},C)},Xl=function(C){return{type:"variable",name:C}},DE=function(C){return C},ju="*",NA=Ce("*",!1),Lr="/",RE=Ce("/",!1),$s=function(C,b,F){return{type:b==="*"?"multiplication":"division",right:F}},eo=function(C,b){return b.reduce((F,H)=>N({left:F},H),C)},Gu=function(C,b,F){return{type:b==="+"?"addition":"subtraction",right:F}},LA="$((",R=Ce("$((",!1),q="))",de=Ce("))",!1),He=function(C){return C},Te="$(",Xe=Ce("$(",!1),Et=function(C){return C},Rt="${",qn=Ce("${",!1),Jb=":-",xO=Ce(":-",!1),PO=function(C,b){return{name:C,defaultValue:b}},Wb=":-}",DO=Ce(":-}",!1),RO=function(C){return{name:C,defaultValue:[]}},zb=":+",FO=Ce(":+",!1),NO=function(C,b){return{name:C,alternativeValue:b}},_b=":+}",LO=Ce(":+}",!1),TO=function(C){return{name:C,alternativeValue:[]}},Vb=function(C){return{name:C}},OO="$",MO=Ce("$",!1),UO=function(C){return e.isGlobPattern(C)},KO=function(C){return C},Xb=/^[a-zA-Z0-9_]/,Zb=_e([["a","z"],["A","Z"],["0","9"],"_"],!1,!1),$b=function(){return O()},eQ=/^[$@*?#a-zA-Z0-9_\-]/,tQ=_e(["$","@","*","?","#",["a","z"],["A","Z"],["0","9"],"_","-"],!1,!1),HO=/^[(){}<>$|&; \t"']/,Yu=_e(["(",")","{","}","<",">","$","|","&",";"," "," ",'"',"'"],!1,!1),rQ=/^[<>&; \t"']/,iQ=_e(["<",">","&",";"," "," ",'"',"'"],!1,!1),FE=/^[ \t]/,NE=_e([" "," "],!1,!1),B=0,Ke=0,TA=[{line:1,column:1}],d=0,E=[],I=0,D;if("startRule"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule "`+e.startRule+'".');n=i[e.startRule]}function O(){return t.substring(Ke,B)}function V(){return It(Ke,B)}function ie(C,b){throw b=b!==void 0?b:It(Ke,B),Mi([ut(C)],t.substring(Ke,B),b)}function Be(C,b){throw b=b!==void 0?b:It(Ke,B),Jn(C,b)}function Ce(C,b){return{type:"literal",text:C,ignoreCase:b}}function _e(C,b,F){return{type:"class",parts:C,inverted:b,ignoreCase:F}}function ot(){return{type:"any"}}function wt(){return{type:"end"}}function ut(C){return{type:"other",description:C}}function nt(C){var b=TA[C],F;if(b)return b;for(F=C-1;!TA[F];)F--;for(b=TA[F],b={line:b.line,column:b.column};Fd&&(d=B,E=[]),E.push(C))}function Jn(C,b){return new nc(C,null,null,b)}function Mi(C,b,F){return new nc(nc.buildMessage(C,b),C,b,F)}function OA(){var C,b;return C=B,b=Gr(),b===r&&(b=null),b!==r&&(Ke=C,b=s(b)),C=b,C}function Gr(){var C,b,F,H,ue;if(C=B,b=Yr(),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();F!==r?(H=va(),H!==r?(ue=ps(),ue===r&&(ue=null),ue!==r?(Ke=C,b=o(b,H,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;if(C===r)if(C=B,b=Yr(),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();F!==r?(H=va(),H===r&&(H=null),H!==r?(Ke=C,b=a(b,H),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;return C}function ps(){var C,b,F,H,ue;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(F=Gr(),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=l(F),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r;return C}function va(){var C;return t.charCodeAt(B)===59?(C=c,B++):(C=r,I===0&&ke(u)),C===r&&(t.charCodeAt(B)===38?(C=g,B++):(C=r,I===0&&ke(f))),C}function Yr(){var C,b,F;return C=B,b=jO(),b!==r?(F=Yge(),F===r&&(F=null),F!==r?(Ke=C,b=h(b,F),C=b):(B=C,C=r)):(B=C,C=r),C}function Yge(){var C,b,F,H,ue,De,Ct;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(F=qge(),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=Yr(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();De!==r?(Ke=C,b=p(F,ue),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;return C}function qge(){var C;return t.substr(B,2)===m?(C=m,B+=2):(C=r,I===0&&ke(y)),C===r&&(t.substr(B,2)===Q?(C=Q,B+=2):(C=r,I===0&&ke(S))),C}function jO(){var C,b,F;return C=B,b=zge(),b!==r?(F=Jge(),F===r&&(F=null),F!==r?(Ke=C,b=x(b,F),C=b):(B=C,C=r)):(B=C,C=r),C}function Jge(){var C,b,F,H,ue,De,Ct;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(F=Wge(),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=jO(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();De!==r?(Ke=C,b=M(F,ue),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;return C}function Wge(){var C;return t.substr(B,2)===Y?(C=Y,B+=2):(C=r,I===0&&ke(U)),C===r&&(t.charCodeAt(B)===124?(C=J,B++):(C=r,I===0&&ke(W))),C}function LE(){var C,b,F,H,ue,De;if(C=B,b=eM(),b!==r)if(t.charCodeAt(B)===61?(F=ee,B++):(F=r,I===0&&ke(Z)),F!==r)if(H=qO(),H!==r){for(ue=[],De=je();De!==r;)ue.push(De),De=je();ue!==r?(Ke=C,b=A(b,H),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r;else B=C,C=r;if(C===r)if(C=B,b=eM(),b!==r)if(t.charCodeAt(B)===61?(F=ee,B++):(F=r,I===0&&ke(Z)),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=ne(b),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r;return C}function zge(){var C,b,F,H,ue,De,Ct,bt,Zr,Ei,ds;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(t.charCodeAt(B)===40?(F=le,B++):(F=r,I===0&&ke(Ae)),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=Gr(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();if(De!==r)if(t.charCodeAt(B)===41?(Ct=T,B++):(Ct=r,I===0&&ke(L)),Ct!==r){for(bt=[],Zr=je();Zr!==r;)bt.push(Zr),Zr=je();if(bt!==r){for(Zr=[],Ei=Th();Ei!==r;)Zr.push(Ei),Ei=Th();if(Zr!==r){for(Ei=[],ds=je();ds!==r;)Ei.push(ds),ds=je();Ei!==r?(Ke=C,b=Ee(ue,Zr),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;if(C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(t.charCodeAt(B)===123?(F=we,B++):(F=r,I===0&&ke(qe)),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=Gr(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();if(De!==r)if(t.charCodeAt(B)===125?(Ct=re,B++):(Ct=r,I===0&&ke(se)),Ct!==r){for(bt=[],Zr=je();Zr!==r;)bt.push(Zr),Zr=je();if(bt!==r){for(Zr=[],Ei=Th();Ei!==r;)Zr.push(Ei),Ei=Th();if(Zr!==r){for(Ei=[],ds=je();ds!==r;)Ei.push(ds),ds=je();Ei!==r?(Ke=C,b=Qe(ue,Zr),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;if(C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r){for(F=[],H=LE();H!==r;)F.push(H),H=LE();if(F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r){if(ue=[],De=YO(),De!==r)for(;De!==r;)ue.push(De),De=YO();else ue=r;if(ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();De!==r?(Ke=C,b=he(F,ue),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}else B=C,C=r}else B=C,C=r;if(C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r){if(F=[],H=LE(),H!==r)for(;H!==r;)F.push(H),H=LE();else F=r;if(F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=Fe(F),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}}}return C}function GO(){var C,b,F,H,ue;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r){if(F=[],H=TE(),H!==r)for(;H!==r;)F.push(H),H=TE();else F=r;if(F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=Ue(F),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r;return C}function YO(){var C,b,F;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r?(F=Th(),F!==r?(Ke=C,b=xe(F),C=b):(B=C,C=r)):(B=C,C=r),C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();b!==r?(F=TE(),F!==r?(Ke=C,b=xe(F),C=b):(B=C,C=r)):(B=C,C=r)}return C}function Th(){var C,b,F,H,ue;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();return b!==r?(ve.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(pe)),F===r&&(F=null),F!==r?(H=_ge(),H!==r?(ue=TE(),ue!==r?(Ke=C,b=X(F,H,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function _ge(){var C;return t.substr(B,2)===be?(C=be,B+=2):(C=r,I===0&&ke(ce)),C===r&&(t.substr(B,2)===fe?(C=fe,B+=2):(C=r,I===0&&ke(gt)),C===r&&(t.charCodeAt(B)===62?(C=Ht,B++):(C=r,I===0&&ke(Mt)),C===r&&(t.substr(B,3)===mi?(C=mi,B+=3):(C=r,I===0&&ke(jt)),C===r&&(t.substr(B,2)===Qr?(C=Qr,B+=2):(C=r,I===0&&ke(Ti)),C===r&&(t.charCodeAt(B)===60?(C=_s,B++):(C=r,I===0&&ke(Un))))))),C}function TE(){var C,b,F;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();return b!==r?(F=qO(),F!==r?(Ke=C,b=xe(F),C=b):(B=C,C=r)):(B=C,C=r),C}function qO(){var C,b,F;if(C=B,b=[],F=JO(),F!==r)for(;F!==r;)b.push(F),F=JO();else b=r;return b!==r&&(Ke=C,b=Kn(b)),C=b,C}function JO(){var C,b;return C=B,b=Vge(),b!==r&&(Ke=C,b=vr(b)),C=b,C===r&&(C=B,b=Xge(),b!==r&&(Ke=C,b=vr(b)),C=b,C===r&&(C=B,b=Zge(),b!==r&&(Ke=C,b=vr(b)),C=b,C===r&&(C=B,b=$ge(),b!==r&&(Ke=C,b=vr(b)),C=b))),C}function Vge(){var C,b,F,H;return C=B,t.substr(B,2)===Hn?(b=Hn,B+=2):(b=r,I===0&&ke(us)),b!==r?(F=rfe(),F!==r?(t.charCodeAt(B)===39?(H=Ia,B++):(H=r,I===0&&ke(SA)),H!==r?(Ke=C,b=Du(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function Xge(){var C,b,F,H;return C=B,t.charCodeAt(B)===39?(b=Ia,B++):(b=r,I===0&&ke(SA)),b!==r?(F=efe(),F!==r?(t.charCodeAt(B)===39?(H=Ia,B++):(H=r,I===0&&ke(SA)),H!==r?(Ke=C,b=Du(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function Zge(){var C,b,F,H;if(C=B,t.substr(B,2)===gs?(b=gs,B+=2):(b=r,I===0&&ke(kA)),b!==r&&(Ke=C,b=ya()),C=b,C===r)if(C=B,t.charCodeAt(B)===34?(b=Ru,B++):(b=r,I===0&&ke(xA)),b!==r){for(F=[],H=WO();H!==r;)F.push(H),H=WO();F!==r?(t.charCodeAt(B)===34?(H=Ru,B++):(H=r,I===0&&ke(xA)),H!==r?(Ke=C,b=PA(F),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;return C}function $ge(){var C,b,F;if(C=B,b=[],F=zO(),F!==r)for(;F!==r;)b.push(F),F=zO();else b=r;return b!==r&&(Ke=C,b=PA(b)),C=b,C}function WO(){var C,b;return C=B,b=ZO(),b!==r&&(Ke=C,b=Sr(b)),C=b,C===r&&(C=B,b=$O(),b!==r&&(Ke=C,b=jl(b)),C=b,C===r&&(C=B,b=aQ(),b!==r&&(Ke=C,b=Fu(b)),C=b,C===r&&(C=B,b=tfe(),b!==r&&(Ke=C,b=So(b)),C=b))),C}function zO(){var C,b;return C=B,b=ZO(),b!==r&&(Ke=C,b=Nu(b)),C=b,C===r&&(C=B,b=$O(),b!==r&&(Ke=C,b=Qh(b)),C=b,C===r&&(C=B,b=aQ(),b!==r&&(Ke=C,b=vh(b)),C=b,C===r&&(C=B,b=sfe(),b!==r&&(Ke=C,b=oe(b)),C=b,C===r&&(C=B,b=nfe(),b!==r&&(Ke=C,b=So(b)),C=b)))),C}function efe(){var C,b,F;for(C=B,b=[],Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko));F!==r;)b.push(F),Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko));return b!==r&&(Ke=C,b=jn(b)),C=b,C}function tfe(){var C,b,F;if(C=B,b=[],F=_O(),F===r&&(Lu.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(vt))),F!==r)for(;F!==r;)b.push(F),F=_O(),F===r&&(Lu.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(vt)));else b=r;return b!==r&&(Ke=C,b=jn(b)),C=b,C}function _O(){var C,b,F;return C=B,t.substr(B,2)===Gl?(b=Gl,B+=2):(b=r,I===0&&ke(Gn)),b!==r&&(Ke=C,b=fs()),C=b,C===r&&(C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(xo.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(lt)),F!==r?(Ke=C,b=mn(F),C=b):(B=C,C=r)):(B=C,C=r)),C}function rfe(){var C,b,F;for(C=B,b=[],F=VO(),F===r&&(Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko)));F!==r;)b.push(F),F=VO(),F===r&&(Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko)));return b!==r&&(Ke=C,b=jn(b)),C=b,C}function VO(){var C,b,F;return C=B,t.substr(B,2)===v?(b=v,B+=2):(b=r,I===0&&ke(Tt)),b!==r&&(Ke=C,b=Tu()),C=b,C===r&&(C=B,t.substr(B,2)===Yl?(b=Yl,B+=2):(b=r,I===0&&ke(Sh)),b!==r&&(Ke=C,b=kh()),C=b,C===r&&(C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(xh.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Ph)),F!==r?(Ke=C,b=Dh(),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===G?(b=G,B+=2):(b=r,I===0&&ke(yt)),b!==r&&(Ke=C,b=DA()),C=b,C===r&&(C=B,t.substr(B,2)===$i?(b=$i,B+=2):(b=r,I===0&&ke(ql)),b!==r&&(Ke=C,b=$e()),C=b,C===r&&(C=B,t.substr(B,2)===wa?(b=wa,B+=2):(b=r,I===0&&ke(Ou)),b!==r&&(Ke=C,b=SE()),C=b,C===r&&(C=B,t.substr(B,2)===Rh?(b=Rh,B+=2):(b=r,I===0&&ke(kE)),b!==r&&(Ke=C,b=gr()),C=b,C===r&&(C=B,t.substr(B,2)===Yn?(b=Yn,B+=2):(b=r,I===0&&ke(Jl)),b!==r&&(Ke=C,b=Fh()),C=b,C===r&&(C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(Vs.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Ba)),F!==r?(Ke=C,b=mn(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=ife()))))))))),C}function ife(){var C,b,F,H,ue,De,Ct,bt,Zr,Ei,ds,AQ;return C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(F=nQ(),F!==r?(Ke=C,b=En(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Oe?(b=Oe,B+=2):(b=r,I===0&&ke(Mu)),b!==r?(F=B,H=B,ue=nQ(),ue!==r?(De=Wn(),De!==r?(ue=[ue,De],H=ue):(B=H,H=r)):(B=H,H=r),H===r&&(H=nQ()),H!==r?F=t.substring(F,B):F=H,F!==r?(Ke=C,b=En(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Wl?(b=Wl,B+=2):(b=r,I===0&&ke(Xs)),b!==r?(F=B,H=B,ue=Wn(),ue!==r?(De=Wn(),De!==r?(Ct=Wn(),Ct!==r?(bt=Wn(),bt!==r?(ue=[ue,De,Ct,bt],H=ue):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r),H!==r?F=t.substring(F,B):F=H,F!==r?(Ke=C,b=En(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===zl?(b=zl,B+=2):(b=r,I===0&&ke(RA)),b!==r?(F=B,H=B,ue=Wn(),ue!==r?(De=Wn(),De!==r?(Ct=Wn(),Ct!==r?(bt=Wn(),bt!==r?(Zr=Wn(),Zr!==r?(Ei=Wn(),Ei!==r?(ds=Wn(),ds!==r?(AQ=Wn(),AQ!==r?(ue=[ue,De,Ct,bt,Zr,Ei,ds,AQ],H=ue):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r),H!==r?F=t.substring(F,B):F=H,F!==r?(Ke=C,b=Uu(F),C=b):(B=C,C=r)):(B=C,C=r)))),C}function nQ(){var C;return Ku.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(ba)),C}function Wn(){var C;return Qa.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(it)),C}function nfe(){var C,b,F,H,ue;if(C=B,b=[],F=B,t.charCodeAt(B)===92?(H=hs,B++):(H=r,I===0&&ke(pt)),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r),F===r&&(F=B,H=B,I++,ue=tM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r)),F!==r)for(;F!==r;)b.push(F),F=B,t.charCodeAt(B)===92?(H=hs,B++):(H=r,I===0&&ke(pt)),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r),F===r&&(F=B,H=B,I++,ue=tM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r));else b=r;return b!==r&&(Ke=C,b=jn(b)),C=b,C}function sQ(){var C,b,F,H,ue,De;if(C=B,t.charCodeAt(B)===45?(b=FA,B++):(b=r,I===0&&ke(_l)),b===r&&(t.charCodeAt(B)===43?(b=Zs,B++):(b=r,I===0&&ke(Vl))),b===r&&(b=null),b!==r){if(F=[],ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe)),H!==r)for(;H!==r;)F.push(H),ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe));else F=r;if(F!==r)if(t.charCodeAt(B)===46?(H=xE,B++):(H=r,I===0&&ke(Nh)),H!==r){if(ue=[],ve.test(t.charAt(B))?(De=t.charAt(B),B++):(De=r,I===0&&ke(pe)),De!==r)for(;De!==r;)ue.push(De),ve.test(t.charAt(B))?(De=t.charAt(B),B++):(De=r,I===0&&ke(pe));else ue=r;ue!==r?(Ke=C,b=Hu(b,F,ue),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;if(C===r){if(C=B,t.charCodeAt(B)===45?(b=FA,B++):(b=r,I===0&&ke(_l)),b===r&&(t.charCodeAt(B)===43?(b=Zs,B++):(b=r,I===0&&ke(Vl))),b===r&&(b=null),b!==r){if(F=[],ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe)),H!==r)for(;H!==r;)F.push(H),ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe));else F=r;F!==r?(Ke=C,b=Lh(b,F),C=b):(B=C,C=r)}else B=C,C=r;if(C===r&&(C=B,b=aQ(),b!==r&&(Ke=C,b=PE(b)),C=b,C===r&&(C=B,b=Zl(),b!==r&&(Ke=C,b=Xl(b)),C=b,C===r)))if(C=B,t.charCodeAt(B)===40?(b=le,B++):(b=r,I===0&&ke(Ae)),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();if(F!==r)if(H=XO(),H!==r){for(ue=[],De=je();De!==r;)ue.push(De),De=je();ue!==r?(t.charCodeAt(B)===41?(De=T,B++):(De=r,I===0&&ke(L)),De!==r?(Ke=C,b=DE(H),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r}return C}function oQ(){var C,b,F,H,ue,De,Ct,bt;if(C=B,b=sQ(),b!==r){for(F=[],H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===42?(De=ju,B++):(De=r,I===0&&ke(NA)),De===r&&(t.charCodeAt(B)===47?(De=Lr,B++):(De=r,I===0&&ke(RE))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=sQ(),bt!==r?(Ke=H,ue=$s(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r;for(;H!==r;){for(F.push(H),H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===42?(De=ju,B++):(De=r,I===0&&ke(NA)),De===r&&(t.charCodeAt(B)===47?(De=Lr,B++):(De=r,I===0&&ke(RE))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=sQ(),bt!==r?(Ke=H,ue=$s(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r}F!==r?(Ke=C,b=eo(b,F),C=b):(B=C,C=r)}else B=C,C=r;return C}function XO(){var C,b,F,H,ue,De,Ct,bt;if(C=B,b=oQ(),b!==r){for(F=[],H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===43?(De=Zs,B++):(De=r,I===0&&ke(Vl)),De===r&&(t.charCodeAt(B)===45?(De=FA,B++):(De=r,I===0&&ke(_l))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=oQ(),bt!==r?(Ke=H,ue=Gu(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r;for(;H!==r;){for(F.push(H),H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===43?(De=Zs,B++):(De=r,I===0&&ke(Vl)),De===r&&(t.charCodeAt(B)===45?(De=FA,B++):(De=r,I===0&&ke(_l))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=oQ(),bt!==r?(Ke=H,ue=Gu(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r}F!==r?(Ke=C,b=eo(b,F),C=b):(B=C,C=r)}else B=C,C=r;return C}function ZO(){var C,b,F,H,ue,De;if(C=B,t.substr(B,3)===LA?(b=LA,B+=3):(b=r,I===0&&ke(R)),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();if(F!==r)if(H=XO(),H!==r){for(ue=[],De=je();De!==r;)ue.push(De),De=je();ue!==r?(t.substr(B,2)===q?(De=q,B+=2):(De=r,I===0&&ke(de)),De!==r?(Ke=C,b=He(H),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;return C}function $O(){var C,b,F,H;return C=B,t.substr(B,2)===Te?(b=Te,B+=2):(b=r,I===0&&ke(Xe)),b!==r?(F=Gr(),F!==r?(t.charCodeAt(B)===41?(H=T,B++):(H=r,I===0&&ke(L)),H!==r?(Ke=C,b=Et(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function aQ(){var C,b,F,H,ue,De;return C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,2)===Jb?(H=Jb,B+=2):(H=r,I===0&&ke(xO)),H!==r?(ue=GO(),ue!==r?(t.charCodeAt(B)===125?(De=re,B++):(De=r,I===0&&ke(se)),De!==r?(Ke=C,b=PO(F,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,3)===Wb?(H=Wb,B+=3):(H=r,I===0&&ke(DO)),H!==r?(Ke=C,b=RO(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,2)===zb?(H=zb,B+=2):(H=r,I===0&&ke(FO)),H!==r?(ue=GO(),ue!==r?(t.charCodeAt(B)===125?(De=re,B++):(De=r,I===0&&ke(se)),De!==r?(Ke=C,b=NO(F,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,3)===_b?(H=_b,B+=3):(H=r,I===0&&ke(LO)),H!==r?(Ke=C,b=TO(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.charCodeAt(B)===125?(H=re,B++):(H=r,I===0&&ke(se)),H!==r?(Ke=C,b=Vb(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.charCodeAt(B)===36?(b=OO,B++):(b=r,I===0&&ke(MO)),b!==r?(F=Zl(),F!==r?(Ke=C,b=Vb(F),C=b):(B=C,C=r)):(B=C,C=r)))))),C}function sfe(){var C,b,F;return C=B,b=ofe(),b!==r?(Ke=B,F=UO(b),F?F=void 0:F=r,F!==r?(Ke=C,b=KO(b),C=b):(B=C,C=r)):(B=C,C=r),C}function ofe(){var C,b,F,H,ue;if(C=B,b=[],F=B,H=B,I++,ue=rM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r),F!==r)for(;F!==r;)b.push(F),F=B,H=B,I++,ue=rM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r);else b=r;return b!==r&&(Ke=C,b=jn(b)),C=b,C}function eM(){var C,b,F;if(C=B,b=[],Xb.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Zb)),F!==r)for(;F!==r;)b.push(F),Xb.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Zb));else b=r;return b!==r&&(Ke=C,b=$b()),C=b,C}function Zl(){var C,b,F;if(C=B,b=[],eQ.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(tQ)),F!==r)for(;F!==r;)b.push(F),eQ.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(tQ));else b=r;return b!==r&&(Ke=C,b=$b()),C=b,C}function tM(){var C;return HO.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(Yu)),C}function rM(){var C;return rQ.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(iQ)),C}function je(){var C,b;if(C=[],FE.test(t.charAt(B))?(b=t.charAt(B),B++):(b=r,I===0&&ke(NE)),b!==r)for(;b!==r;)C.push(b),FE.test(t.charAt(B))?(b=t.charAt(B),B++):(b=r,I===0&&ke(NE));else C=r;return C}if(D=n(),D!==r&&B===t.length)return D;throw D!==r&&B{"use strict";function ohe(t,e){function r(){this.constructor=t}r.prototype=e.prototype,t.prototype=new r}function oc(t,e,r,i){this.message=t,this.expected=e,this.found=r,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,oc)}ohe(oc,Error);oc.buildMessage=function(t,e){var r={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;gY&&(Y=S,U=[]),U.push(pe))}function se(pe,X){return new oc(pe,null,null,X)}function Qe(pe,X,be){return new oc(oc.buildMessage(pe,X),pe,X,be)}function he(){var pe,X,be,ce;return pe=S,X=Fe(),X!==r?(t.charCodeAt(S)===47?(be=s,S++):(be=r,J===0&&re(o)),be!==r?(ce=Fe(),ce!==r?(x=pe,X=a(X,ce),pe=X):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r),pe===r&&(pe=S,X=Fe(),X!==r&&(x=pe,X=l(X)),pe=X),pe}function Fe(){var pe,X,be,ce;return pe=S,X=Ue(),X!==r?(t.charCodeAt(S)===64?(be=c,S++):(be=r,J===0&&re(u)),be!==r?(ce=ve(),ce!==r?(x=pe,X=g(X,ce),pe=X):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r),pe===r&&(pe=S,X=Ue(),X!==r&&(x=pe,X=f(X)),pe=X),pe}function Ue(){var pe,X,be,ce,fe;return pe=S,t.charCodeAt(S)===64?(X=c,S++):(X=r,J===0&&re(u)),X!==r?(be=xe(),be!==r?(t.charCodeAt(S)===47?(ce=s,S++):(ce=r,J===0&&re(o)),ce!==r?(fe=xe(),fe!==r?(x=pe,X=h(),pe=X):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r),pe===r&&(pe=S,X=xe(),X!==r&&(x=pe,X=h()),pe=X),pe}function xe(){var pe,X,be;if(pe=S,X=[],p.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(m)),be!==r)for(;be!==r;)X.push(be),p.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(m));else X=r;return X!==r&&(x=pe,X=h()),pe=X,pe}function ve(){var pe,X,be;if(pe=S,X=[],y.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(Q)),be!==r)for(;be!==r;)X.push(be),y.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(Q));else X=r;return X!==r&&(x=pe,X=h()),pe=X,pe}if(W=n(),W!==r&&S===t.length)return W;throw W!==r&&S{"use strict";function E1(t){return typeof t=="undefined"||t===null}function Ahe(t){return typeof t=="object"&&t!==null}function lhe(t){return Array.isArray(t)?t:E1(t)?[]:[t]}function che(t,e){var r,i,n,s;if(e)for(s=Object.keys(e),r=0,i=s.length;r{"use strict";function ep(t,e){Error.call(this),this.name="YAMLException",this.reason=t,this.mark=e,this.message=(this.reason||"(unknown reason)")+(this.mark?" "+this.mark.toString():""),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=new Error().stack||""}ep.prototype=Object.create(Error.prototype);ep.prototype.constructor=ep;ep.prototype.toString=function(e){var r=this.name+": ";return r+=this.reason||"(unknown reason)",!e&&this.mark&&(r+=" "+this.mark.toString()),r};I1.exports=ep});var B1=w((DZe,y1)=>{"use strict";var w1=Ac();function HQ(t,e,r,i,n){this.name=t,this.buffer=e,this.position=r,this.line=i,this.column=n}HQ.prototype.getSnippet=function(e,r){var i,n,s,o,a;if(!this.buffer)return null;for(e=e||4,r=r||75,i="",n=this.position;n>0&&`\0\r
+\x85\u2028\u2029`.indexOf(this.buffer.charAt(n-1))===-1;)if(n-=1,this.position-n>r/2-1){i=" ... ",n+=5;break}for(s="",o=this.position;or/2-1){s=" ... ",o-=5;break}return a=this.buffer.slice(n,o),w1.repeat(" ",e)+i+a+s+`
+`+w1.repeat(" ",e+this.position-n+i.length)+"^"};HQ.prototype.toString=function(e){var r,i="";return this.name&&(i+='in "'+this.name+'" '),i+="at line "+(this.line+1)+", column "+(this.column+1),e||(r=this.getSnippet(),r&&(i+=`:
+`+r)),i};y1.exports=HQ});var li=w((RZe,b1)=>{"use strict";var Q1=Vu(),fhe=["kind","resolve","construct","instanceOf","predicate","represent","defaultStyle","styleAliases"],hhe=["scalar","sequence","mapping"];function phe(t){var e={};return t!==null&&Object.keys(t).forEach(function(r){t[r].forEach(function(i){e[String(i)]=r})}),e}function dhe(t,e){if(e=e||{},Object.keys(e).forEach(function(r){if(fhe.indexOf(r)===-1)throw new Q1('Unknown option "'+r+'" is met in definition of "'+t+'" YAML type.')}),this.tag=t,this.kind=e.kind||null,this.resolve=e.resolve||function(){return!0},this.construct=e.construct||function(r){return r},this.instanceOf=e.instanceOf||null,this.predicate=e.predicate||null,this.represent=e.represent||null,this.defaultStyle=e.defaultStyle||null,this.styleAliases=phe(e.styleAliases||null),hhe.indexOf(this.kind)===-1)throw new Q1('Unknown kind "'+this.kind+'" is specified for "'+t+'" YAML type.')}b1.exports=dhe});var lc=w((FZe,v1)=>{"use strict";var S1=Ac(),nI=Vu(),Che=li();function jQ(t,e,r){var i=[];return t.include.forEach(function(n){r=jQ(n,e,r)}),t[e].forEach(function(n){r.forEach(function(s,o){s.tag===n.tag&&s.kind===n.kind&&i.push(o)}),r.push(n)}),r.filter(function(n,s){return i.indexOf(s)===-1})}function mhe(){var t={scalar:{},sequence:{},mapping:{},fallback:{}},e,r;function i(n){t[n.kind][n.tag]=t.fallback[n.tag]=n}for(e=0,r=arguments.length;e{"use strict";var Ehe=li();k1.exports=new Ehe("tag:yaml.org,2002:str",{kind:"scalar",construct:function(t){return t!==null?t:""}})});var D1=w((LZe,P1)=>{"use strict";var Ihe=li();P1.exports=new Ihe("tag:yaml.org,2002:seq",{kind:"sequence",construct:function(t){return t!==null?t:[]}})});var F1=w((TZe,R1)=>{"use strict";var yhe=li();R1.exports=new yhe("tag:yaml.org,2002:map",{kind:"mapping",construct:function(t){return t!==null?t:{}}})});var sI=w((OZe,N1)=>{"use strict";var whe=lc();N1.exports=new whe({explicit:[x1(),D1(),F1()]})});var T1=w((MZe,L1)=>{"use strict";var Bhe=li();function bhe(t){if(t===null)return!0;var e=t.length;return e===1&&t==="~"||e===4&&(t==="null"||t==="Null"||t==="NULL")}function Qhe(){return null}function vhe(t){return t===null}L1.exports=new Bhe("tag:yaml.org,2002:null",{kind:"scalar",resolve:bhe,construct:Qhe,predicate:vhe,represent:{canonical:function(){return"~"},lowercase:function(){return"null"},uppercase:function(){return"NULL"},camelcase:function(){return"Null"}},defaultStyle:"lowercase"})});var M1=w((UZe,O1)=>{"use strict";var She=li();function khe(t){if(t===null)return!1;var e=t.length;return e===4&&(t==="true"||t==="True"||t==="TRUE")||e===5&&(t==="false"||t==="False"||t==="FALSE")}function xhe(t){return t==="true"||t==="True"||t==="TRUE"}function Phe(t){return Object.prototype.toString.call(t)==="[object Boolean]"}O1.exports=new She("tag:yaml.org,2002:bool",{kind:"scalar",resolve:khe,construct:xhe,predicate:Phe,represent:{lowercase:function(t){return t?"true":"false"},uppercase:function(t){return t?"TRUE":"FALSE"},camelcase:function(t){return t?"True":"False"}},defaultStyle:"lowercase"})});var K1=w((KZe,U1)=>{"use strict";var Dhe=Ac(),Rhe=li();function Fhe(t){return 48<=t&&t<=57||65<=t&&t<=70||97<=t&&t<=102}function Nhe(t){return 48<=t&&t<=55}function Lhe(t){return 48<=t&&t<=57}function The(t){if(t===null)return!1;var e=t.length,r=0,i=!1,n;if(!e)return!1;if(n=t[r],(n==="-"||n==="+")&&(n=t[++r]),n==="0"){if(r+1===e)return!0;if(n=t[++r],n==="b"){for(r++;r=0?"0b"+t.toString(2):"-0b"+t.toString(2).slice(1)},octal:function(t){return t>=0?"0"+t.toString(8):"-0"+t.toString(8).slice(1)},decimal:function(t){return t.toString(10)},hexadecimal:function(t){return t>=0?"0x"+t.toString(16).toUpperCase():"-0x"+t.toString(16).toUpperCase().slice(1)}},defaultStyle:"decimal",styleAliases:{binary:[2,"bin"],octal:[8,"oct"],decimal:[10,"dec"],hexadecimal:[16,"hex"]}})});var G1=w((HZe,H1)=>{"use strict";var j1=Ac(),Uhe=li(),Khe=new RegExp("^(?:[-+]?(?:0|[1-9][0-9_]*)(?:\\.[0-9_]*)?(?:[eE][-+]?[0-9]+)?|\\.[0-9_]+(?:[eE][-+]?[0-9]+)?|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*|[-+]?\\.(?:inf|Inf|INF)|\\.(?:nan|NaN|NAN))$");function Hhe(t){return!(t===null||!Khe.test(t)||t[t.length-1]==="_")}function jhe(t){var e,r,i,n;return e=t.replace(/_/g,"").toLowerCase(),r=e[0]==="-"?-1:1,n=[],"+-".indexOf(e[0])>=0&&(e=e.slice(1)),e===".inf"?r===1?Number.POSITIVE_INFINITY:Number.NEGATIVE_INFINITY:e===".nan"?NaN:e.indexOf(":")>=0?(e.split(":").forEach(function(s){n.unshift(parseFloat(s,10))}),e=0,i=1,n.forEach(function(s){e+=s*i,i*=60}),r*e):r*parseFloat(e,10)}var Ghe=/^[-+]?[0-9]+e/;function Yhe(t,e){var r;if(isNaN(t))switch(e){case"lowercase":return".nan";case"uppercase":return".NAN";case"camelcase":return".NaN"}else if(Number.POSITIVE_INFINITY===t)switch(e){case"lowercase":return".inf";case"uppercase":return".INF";case"camelcase":return".Inf"}else if(Number.NEGATIVE_INFINITY===t)switch(e){case"lowercase":return"-.inf";case"uppercase":return"-.INF";case"camelcase":return"-.Inf"}else if(j1.isNegativeZero(t))return"-0.0";return r=t.toString(10),Ghe.test(r)?r.replace("e",".e"):r}function qhe(t){return Object.prototype.toString.call(t)==="[object Number]"&&(t%1!=0||j1.isNegativeZero(t))}H1.exports=new Uhe("tag:yaml.org,2002:float",{kind:"scalar",resolve:Hhe,construct:jhe,predicate:qhe,represent:Yhe,defaultStyle:"lowercase"})});var GQ=w((jZe,Y1)=>{"use strict";var Jhe=lc();Y1.exports=new Jhe({include:[sI()],implicit:[T1(),M1(),K1(),G1()]})});var YQ=w((GZe,q1)=>{"use strict";var Whe=lc();q1.exports=new Whe({include:[GQ()]})});var _1=w((YZe,J1)=>{"use strict";var zhe=li(),W1=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])$"),z1=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:[Tt]|[ \\t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \\t]*(Z|([-+])([0-9][0-9]?)(?::([0-9][0-9]))?))?$");function _he(t){return t===null?!1:W1.exec(t)!==null||z1.exec(t)!==null}function Vhe(t){var e,r,i,n,s,o,a,l=0,c=null,u,g,f;if(e=W1.exec(t),e===null&&(e=z1.exec(t)),e===null)throw new Error("Date resolve error");if(r=+e[1],i=+e[2]-1,n=+e[3],!e[4])return new Date(Date.UTC(r,i,n));if(s=+e[4],o=+e[5],a=+e[6],e[7]){for(l=e[7].slice(0,3);l.length<3;)l+="0";l=+l}return e[9]&&(u=+e[10],g=+(e[11]||0),c=(u*60+g)*6e4,e[9]==="-"&&(c=-c)),f=new Date(Date.UTC(r,i,n,s,o,a,l)),c&&f.setTime(f.getTime()-c),f}function Xhe(t){return t.toISOString()}J1.exports=new zhe("tag:yaml.org,2002:timestamp",{kind:"scalar",resolve:_he,construct:Vhe,instanceOf:Date,represent:Xhe})});var X1=w((qZe,V1)=>{"use strict";var Zhe=li();function $he(t){return t==="<<"||t===null}V1.exports=new Zhe("tag:yaml.org,2002:merge",{kind:"scalar",resolve:$he})});var eU=w((JZe,Z1)=>{"use strict";var cc;try{$1=require,cc=$1("buffer").Buffer}catch(t){}var $1,epe=li(),qQ=`ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=
+\r`;function tpe(t){if(t===null)return!1;var e,r,i=0,n=t.length,s=qQ;for(r=0;r64)){if(e<0)return!1;i+=6}return i%8==0}function rpe(t){var e,r,i=t.replace(/[\r\n=]/g,""),n=i.length,s=qQ,o=0,a=[];for(e=0;e>16&255),a.push(o>>8&255),a.push(o&255)),o=o<<6|s.indexOf(i.charAt(e));return r=n%4*6,r===0?(a.push(o>>16&255),a.push(o>>8&255),a.push(o&255)):r===18?(a.push(o>>10&255),a.push(o>>2&255)):r===12&&a.push(o>>4&255),cc?cc.from?cc.from(a):new cc(a):a}function ipe(t){var e="",r=0,i,n,s=t.length,o=qQ;for(i=0;i>18&63],e+=o[r>>12&63],e+=o[r>>6&63],e+=o[r&63]),r=(r<<8)+t[i];return n=s%3,n===0?(e+=o[r>>18&63],e+=o[r>>12&63],e+=o[r>>6&63],e+=o[r&63]):n===2?(e+=o[r>>10&63],e+=o[r>>4&63],e+=o[r<<2&63],e+=o[64]):n===1&&(e+=o[r>>2&63],e+=o[r<<4&63],e+=o[64],e+=o[64]),e}function npe(t){return cc&&cc.isBuffer(t)}Z1.exports=new epe("tag:yaml.org,2002:binary",{kind:"scalar",resolve:tpe,construct:rpe,predicate:npe,represent:ipe})});var rU=w((WZe,tU)=>{"use strict";var spe=li(),ope=Object.prototype.hasOwnProperty,ape=Object.prototype.toString;function Ape(t){if(t===null)return!0;var e=[],r,i,n,s,o,a=t;for(r=0,i=a.length;r{"use strict";var cpe=li(),upe=Object.prototype.toString;function gpe(t){if(t===null)return!0;var e,r,i,n,s,o=t;for(s=new Array(o.length),e=0,r=o.length;e{"use strict";var hpe=li(),ppe=Object.prototype.hasOwnProperty;function dpe(t){if(t===null)return!0;var e,r=t;for(e in r)if(ppe.call(r,e)&&r[e]!==null)return!1;return!0}function Cpe(t){return t!==null?t:{}}sU.exports=new hpe("tag:yaml.org,2002:set",{kind:"mapping",resolve:dpe,construct:Cpe})});var Zu=w((VZe,aU)=>{"use strict";var mpe=lc();aU.exports=new mpe({include:[YQ()],implicit:[_1(),X1()],explicit:[eU(),rU(),nU(),oU()]})});var lU=w((XZe,AU)=>{"use strict";var Epe=li();function Ipe(){return!0}function ype(){}function wpe(){return""}function Bpe(t){return typeof t=="undefined"}AU.exports=new Epe("tag:yaml.org,2002:js/undefined",{kind:"scalar",resolve:Ipe,construct:ype,predicate:Bpe,represent:wpe})});var uU=w((ZZe,cU)=>{"use strict";var bpe=li();function Qpe(t){if(t===null||t.length===0)return!1;var e=t,r=/\/([gim]*)$/.exec(t),i="";return!(e[0]==="/"&&(r&&(i=r[1]),i.length>3||e[e.length-i.length-1]!=="/"))}function vpe(t){var e=t,r=/\/([gim]*)$/.exec(t),i="";return e[0]==="/"&&(r&&(i=r[1]),e=e.slice(1,e.length-i.length-1)),new RegExp(e,i)}function Spe(t){var e="/"+t.source+"/";return t.global&&(e+="g"),t.multiline&&(e+="m"),t.ignoreCase&&(e+="i"),e}function kpe(t){return Object.prototype.toString.call(t)==="[object RegExp]"}cU.exports=new bpe("tag:yaml.org,2002:js/regexp",{kind:"scalar",resolve:Qpe,construct:vpe,predicate:kpe,represent:Spe})});var hU=w(($Ze,gU)=>{"use strict";var oI;try{fU=require,oI=fU("esprima")}catch(t){typeof window!="undefined"&&(oI=window.esprima)}var fU,xpe=li();function Ppe(t){if(t===null)return!1;try{var e="("+t+")",r=oI.parse(e,{range:!0});return!(r.type!=="Program"||r.body.length!==1||r.body[0].type!=="ExpressionStatement"||r.body[0].expression.type!=="ArrowFunctionExpression"&&r.body[0].expression.type!=="FunctionExpression")}catch(i){return!1}}function Dpe(t){var e="("+t+")",r=oI.parse(e,{range:!0}),i=[],n;if(r.type!=="Program"||r.body.length!==1||r.body[0].type!=="ExpressionStatement"||r.body[0].expression.type!=="ArrowFunctionExpression"&&r.body[0].expression.type!=="FunctionExpression")throw new Error("Failed to resolve function");return r.body[0].expression.params.forEach(function(s){i.push(s.name)}),n=r.body[0].expression.body.range,r.body[0].expression.body.type==="BlockStatement"?new Function(i,e.slice(n[0]+1,n[1]-1)):new Function(i,"return "+e.slice(n[0],n[1]))}function Rpe(t){return t.toString()}function Fpe(t){return Object.prototype.toString.call(t)==="[object Function]"}gU.exports=new xpe("tag:yaml.org,2002:js/function",{kind:"scalar",resolve:Ppe,construct:Dpe,predicate:Fpe,represent:Rpe})});var tp=w((e$e,pU)=>{"use strict";var dU=lc();pU.exports=dU.DEFAULT=new dU({include:[Zu()],explicit:[lU(),uU(),hU()]})});var LU=w((t$e,rp)=>{"use strict";var Fa=Ac(),CU=Vu(),Npe=B1(),mU=Zu(),Lpe=tp(),HA=Object.prototype.hasOwnProperty,aI=1,EU=2,IU=3,AI=4,JQ=1,Tpe=2,yU=3,Ope=/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x84\x86-\x9F\uFFFE\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/,Mpe=/[\x85\u2028\u2029]/,Upe=/[,\[\]\{\}]/,wU=/^(?:!|!!|![a-z\-]+!)$/i,BU=/^(?:!|[^,\[\]\{\}])(?:%[0-9a-f]{2}|[0-9a-z\-#;\/\?:@&=\+\$,_\.!~\*'\(\)\[\]])*$/i;function bU(t){return Object.prototype.toString.call(t)}function Ro(t){return t===10||t===13}function uc(t){return t===9||t===32}function yn(t){return t===9||t===32||t===10||t===13}function $u(t){return t===44||t===91||t===93||t===123||t===125}function Kpe(t){var e;return 48<=t&&t<=57?t-48:(e=t|32,97<=e&&e<=102?e-97+10:-1)}function Hpe(t){return t===120?2:t===117?4:t===85?8:0}function jpe(t){return 48<=t&&t<=57?t-48:-1}function QU(t){return t===48?"\0":t===97?"\x07":t===98?"\b":t===116||t===9?" ":t===110?`
+`:t===118?"\v":t===102?"\f":t===114?"\r":t===101?"":t===32?" ":t===34?'"':t===47?"/":t===92?"\\":t===78?"\x85":t===95?"\xA0":t===76?"\u2028":t===80?"\u2029":""}function Gpe(t){return t<=65535?String.fromCharCode(t):String.fromCharCode((t-65536>>10)+55296,(t-65536&1023)+56320)}var vU=new Array(256),SU=new Array(256);for(var eg=0;eg<256;eg++)vU[eg]=QU(eg)?1:0,SU[eg]=QU(eg);function Ype(t,e){this.input=t,this.filename=e.filename||null,this.schema=e.schema||Lpe,this.onWarning=e.onWarning||null,this.legacy=e.legacy||!1,this.json=e.json||!1,this.listener=e.listener||null,this.implicitTypes=this.schema.compiledImplicit,this.typeMap=this.schema.compiledTypeMap,this.length=t.length,this.position=0,this.line=0,this.lineStart=0,this.lineIndent=0,this.documents=[]}function kU(t,e){return new CU(e,new Npe(t.filename,t.input,t.position,t.line,t.position-t.lineStart))}function dt(t,e){throw kU(t,e)}function lI(t,e){t.onWarning&&t.onWarning.call(null,kU(t,e))}var xU={YAML:function(e,r,i){var n,s,o;e.version!==null&&dt(e,"duplication of %YAML directive"),i.length!==1&&dt(e,"YAML directive accepts exactly one argument"),n=/^([0-9]+)\.([0-9]+)$/.exec(i[0]),n===null&&dt(e,"ill-formed argument of the YAML directive"),s=parseInt(n[1],10),o=parseInt(n[2],10),s!==1&&dt(e,"unacceptable YAML version of the document"),e.version=i[0],e.checkLineBreaks=o<2,o!==1&&o!==2&&lI(e,"unsupported YAML version of the document")},TAG:function(e,r,i){var n,s;i.length!==2&&dt(e,"TAG directive accepts exactly two arguments"),n=i[0],s=i[1],wU.test(n)||dt(e,"ill-formed tag handle (first argument) of the TAG directive"),HA.call(e.tagMap,n)&&dt(e,'there is a previously declared suffix for "'+n+'" tag handle'),BU.test(s)||dt(e,"ill-formed tag prefix (second argument) of the TAG directive"),e.tagMap[n]=s}};function jA(t,e,r,i){var n,s,o,a;if(e1&&(t.result+=Fa.repeat(`
+`,e-1))}function qpe(t,e,r){var i,n,s,o,a,l,c,u,g=t.kind,f=t.result,h;if(h=t.input.charCodeAt(t.position),yn(h)||$u(h)||h===35||h===38||h===42||h===33||h===124||h===62||h===39||h===34||h===37||h===64||h===96||(h===63||h===45)&&(n=t.input.charCodeAt(t.position+1),yn(n)||r&&$u(n)))return!1;for(t.kind="scalar",t.result="",s=o=t.position,a=!1;h!==0;){if(h===58){if(n=t.input.charCodeAt(t.position+1),yn(n)||r&&$u(n))break}else if(h===35){if(i=t.input.charCodeAt(t.position-1),yn(i))break}else{if(t.position===t.lineStart&&cI(t)||r&&$u(h))break;if(Ro(h))if(l=t.line,c=t.lineStart,u=t.lineIndent,$r(t,!1,-1),t.lineIndent>=e){a=!0,h=t.input.charCodeAt(t.position);continue}else{t.position=o,t.line=l,t.lineStart=c,t.lineIndent=u;break}}a&&(jA(t,s,o,!1),zQ(t,t.line-l),s=o=t.position,a=!1),uc(h)||(o=t.position+1),h=t.input.charCodeAt(++t.position)}return jA(t,s,o,!1),t.result?!0:(t.kind=g,t.result=f,!1)}function Jpe(t,e){var r,i,n;if(r=t.input.charCodeAt(t.position),r!==39)return!1;for(t.kind="scalar",t.result="",t.position++,i=n=t.position;(r=t.input.charCodeAt(t.position))!==0;)if(r===39)if(jA(t,i,t.position,!0),r=t.input.charCodeAt(++t.position),r===39)i=t.position,t.position++,n=t.position;else return!0;else Ro(r)?(jA(t,i,n,!0),zQ(t,$r(t,!1,e)),i=n=t.position):t.position===t.lineStart&&cI(t)?dt(t,"unexpected end of the document within a single quoted scalar"):(t.position++,n=t.position);dt(t,"unexpected end of the stream within a single quoted scalar")}function Wpe(t,e){var r,i,n,s,o,a;if(a=t.input.charCodeAt(t.position),a!==34)return!1;for(t.kind="scalar",t.result="",t.position++,r=i=t.position;(a=t.input.charCodeAt(t.position))!==0;){if(a===34)return jA(t,r,t.position,!0),t.position++,!0;if(a===92){if(jA(t,r,t.position,!0),a=t.input.charCodeAt(++t.position),Ro(a))$r(t,!1,e);else if(a<256&&vU[a])t.result+=SU[a],t.position++;else if((o=Hpe(a))>0){for(n=o,s=0;n>0;n--)a=t.input.charCodeAt(++t.position),(o=Kpe(a))>=0?s=(s<<4)+o:dt(t,"expected hexadecimal character");t.result+=Gpe(s),t.position++}else dt(t,"unknown escape sequence");r=i=t.position}else Ro(a)?(jA(t,r,i,!0),zQ(t,$r(t,!1,e)),r=i=t.position):t.position===t.lineStart&&cI(t)?dt(t,"unexpected end of the document within a double quoted scalar"):(t.position++,i=t.position)}dt(t,"unexpected end of the stream within a double quoted scalar")}function zpe(t,e){var r=!0,i,n=t.tag,s,o=t.anchor,a,l,c,u,g,f={},h,p,m,y;if(y=t.input.charCodeAt(t.position),y===91)l=93,g=!1,s=[];else if(y===123)l=125,g=!0,s={};else return!1;for(t.anchor!==null&&(t.anchorMap[t.anchor]=s),y=t.input.charCodeAt(++t.position);y!==0;){if($r(t,!0,e),y=t.input.charCodeAt(t.position),y===l)return t.position++,t.tag=n,t.anchor=o,t.kind=g?"mapping":"sequence",t.result=s,!0;r||dt(t,"missed comma between flow collection entries"),p=h=m=null,c=u=!1,y===63&&(a=t.input.charCodeAt(t.position+1),yn(a)&&(c=u=!0,t.position++,$r(t,!0,e))),i=t.line,rg(t,e,aI,!1,!0),p=t.tag,h=t.result,$r(t,!0,e),y=t.input.charCodeAt(t.position),(u||t.line===i)&&y===58&&(c=!0,y=t.input.charCodeAt(++t.position),$r(t,!0,e),rg(t,e,aI,!1,!0),m=t.result),g?tg(t,s,f,p,h,m):c?s.push(tg(t,null,f,p,h,m)):s.push(h),$r(t,!0,e),y=t.input.charCodeAt(t.position),y===44?(r=!0,y=t.input.charCodeAt(++t.position)):r=!1}dt(t,"unexpected end of the stream within a flow collection")}function _pe(t,e){var r,i,n=JQ,s=!1,o=!1,a=e,l=0,c=!1,u,g;if(g=t.input.charCodeAt(t.position),g===124)i=!1;else if(g===62)i=!0;else return!1;for(t.kind="scalar",t.result="";g!==0;)if(g=t.input.charCodeAt(++t.position),g===43||g===45)JQ===n?n=g===43?yU:Tpe:dt(t,"repeat of a chomping mode identifier");else if((u=jpe(g))>=0)u===0?dt(t,"bad explicit indentation width of a block scalar; it cannot be less than one"):o?dt(t,"repeat of an indentation width identifier"):(a=e+u-1,o=!0);else break;if(uc(g)){do g=t.input.charCodeAt(++t.position);while(uc(g));if(g===35)do g=t.input.charCodeAt(++t.position);while(!Ro(g)&&g!==0)}for(;g!==0;){for(WQ(t),t.lineIndent=0,g=t.input.charCodeAt(t.position);(!o||t.lineIndenta&&(a=t.lineIndent),Ro(g)){l++;continue}if(t.lineIndent e)&&l!==0)dt(t,"bad indentation of a sequence entry");else if(t.lineIndente)&&(rg(t,e,AI,!0,n)&&(p?f=t.result:h=t.result),p||(tg(t,c,u,g,f,h,s,o),g=f=h=null),$r(t,!0,-1),y=t.input.charCodeAt(t.position)),t.lineIndent>e&&y!==0)dt(t,"bad indentation of a mapping entry");else if(t.lineIndente?l=1:t.lineIndent===e?l=0:t.lineIndente?l=1:t.lineIndent===e?l=0:t.lineIndent tag; it should be "scalar", not "'+t.kind+'"'),g=0,f=t.implicitTypes.length;g tag; it should be "'+h.kind+'", not "'+t.kind+'"'),h.resolve(t.result)?(t.result=h.construct(t.result),t.anchor!==null&&(t.anchorMap[t.anchor]=t.result)):dt(t,"cannot resolve a node with !<"+t.tag+"> explicit tag")):dt(t,"unknown tag !<"+t.tag+">");return t.listener!==null&&t.listener("close",t),t.tag!==null||t.anchor!==null||u}function ede(t){var e=t.position,r,i,n,s=!1,o;for(t.version=null,t.checkLineBreaks=t.legacy,t.tagMap={},t.anchorMap={};(o=t.input.charCodeAt(t.position))!==0&&($r(t,!0,-1),o=t.input.charCodeAt(t.position),!(t.lineIndent>0||o!==37));){for(s=!0,o=t.input.charCodeAt(++t.position),r=t.position;o!==0&&!yn(o);)o=t.input.charCodeAt(++t.position);for(i=t.input.slice(r,t.position),n=[],i.length<1&&dt(t,"directive name must not be less than one character in length");o!==0;){for(;uc(o);)o=t.input.charCodeAt(++t.position);if(o===35){do o=t.input.charCodeAt(++t.position);while(o!==0&&!Ro(o));break}if(Ro(o))break;for(r=t.position;o!==0&&!yn(o);)o=t.input.charCodeAt(++t.position);n.push(t.input.slice(r,t.position))}o!==0&&WQ(t),HA.call(xU,i)?xU[i](t,i,n):lI(t,'unknown document directive "'+i+'"')}if($r(t,!0,-1),t.lineIndent===0&&t.input.charCodeAt(t.position)===45&&t.input.charCodeAt(t.position+1)===45&&t.input.charCodeAt(t.position+2)===45?(t.position+=3,$r(t,!0,-1)):s&&dt(t,"directives end mark is expected"),rg(t,t.lineIndent-1,AI,!1,!0),$r(t,!0,-1),t.checkLineBreaks&&Mpe.test(t.input.slice(e,t.position))&&lI(t,"non-ASCII line breaks are interpreted as content"),t.documents.push(t.result),t.position===t.lineStart&&cI(t)){t.input.charCodeAt(t.position)===46&&(t.position+=3,$r(t,!0,-1));return}if(t.position{"use strict";var ip=Ac(),np=Vu(),ide=tp(),nde=Zu(),TU=Object.prototype.toString,OU=Object.prototype.hasOwnProperty,sde=9,sp=10,ode=13,ade=32,Ade=33,lde=34,MU=35,cde=37,ude=38,gde=39,fde=42,UU=44,hde=45,KU=58,pde=61,dde=62,Cde=63,mde=64,HU=91,jU=93,Ede=96,GU=123,Ide=124,YU=125,Ui={};Ui[0]="\\0";Ui[7]="\\a";Ui[8]="\\b";Ui[9]="\\t";Ui[10]="\\n";Ui[11]="\\v";Ui[12]="\\f";Ui[13]="\\r";Ui[27]="\\e";Ui[34]='\\"';Ui[92]="\\\\";Ui[133]="\\N";Ui[160]="\\_";Ui[8232]="\\L";Ui[8233]="\\P";var yde=["y","Y","yes","Yes","YES","on","On","ON","n","N","no","No","NO","off","Off","OFF"];function wde(t,e){var r,i,n,s,o,a,l;if(e===null)return{};for(r={},i=Object.keys(e),n=0,s=i.length;n0?t.charCodeAt(s-1):null,f=f&&WU(o,a)}else{for(s=0;si&&t[g+1]!==" ",g=s);else if(!ig(o))return uI;a=s>0?t.charCodeAt(s-1):null,f=f&&WU(o,a)}c=c||u&&s-g-1>i&&t[g+1]!==" "}return!l&&!c?f&&!n(t)?_U:VU:r>9&&zU(t)?uI:c?ZU:XU}function Pde(t,e,r,i){t.dump=function(){if(e.length===0)return"''";if(!t.noCompatMode&&yde.indexOf(e)!==-1)return"'"+e+"'";var n=t.indent*Math.max(1,r),s=t.lineWidth===-1?-1:Math.max(Math.min(t.lineWidth,40),t.lineWidth-n),o=i||t.flowLevel>-1&&r>=t.flowLevel;function a(l){return bde(t,l)}switch(Sde(e,o,t.indent,s,a)){case _U:return e;case VU:return"'"+e.replace(/'/g,"''")+"'";case XU:return"|"+$U(e,t.indent)+eK(JU(e,n));case ZU:return">"+$U(e,t.indent)+eK(JU(kde(e,s),n));case uI:return'"'+xde(e,s)+'"';default:throw new np("impossible error: invalid scalar style")}}()}function $U(t,e){var r=zU(t)?String(e):"",i=t[t.length-1]===`
+`,n=i&&(t[t.length-2]===`
+`||t===`
+`),s=n?"+":i?"":"-";return r+s+`
+`}function eK(t){return t[t.length-1]===`
+`?t.slice(0,-1):t}function kde(t,e){for(var r=/(\n+)([^\n]*)/g,i=function(){var c=t.indexOf(`
+`);return c=c!==-1?c:t.length,r.lastIndex=c,tK(t.slice(0,c),e)}(),n=t[0]===`
+`||t[0]===" ",s,o;o=r.exec(t);){var a=o[1],l=o[2];s=l[0]===" ",i+=a+(!n&&!s&&l!==""?`
+`:"")+tK(l,e),n=s}return i}function tK(t,e){if(t===""||t[0]===" ")return t;for(var r=/ [^ ]/g,i,n=0,s,o=0,a=0,l="";i=r.exec(t);)a=i.index,a-n>e&&(s=o>n?o:a,l+=`
+`+t.slice(n,s),n=s+1),o=a;return l+=`
+`,t.length-n>e&&o>n?l+=t.slice(n,o)+`
+`+t.slice(o+1):l+=t.slice(n),l.slice(1)}function xde(t){for(var e="",r,i,n,s=0;s=55296&&r<=56319&&(i=t.charCodeAt(s+1),i>=56320&&i<=57343)){e+=qU((r-55296)*1024+i-56320+65536),s++;continue}n=Ui[r],e+=!n&&ig(r)?t[s]:n||qU(r)}return e}function Dde(t,e,r){var i="",n=t.tag,s,o;for(s=0,o=r.length;s1024&&(u+="? "),u+=t.dump+(t.condenseFlow?'"':"")+":"+(t.condenseFlow?"":" "),!!gc(t,e,c,!1,!1)&&(u+=t.dump,i+=u));t.tag=n,t.dump="{"+i+"}"}function Nde(t,e,r,i){var n="",s=t.tag,o=Object.keys(r),a,l,c,u,g,f;if(t.sortKeys===!0)o.sort();else if(typeof t.sortKeys=="function")o.sort(t.sortKeys);else if(t.sortKeys)throw new np("sortKeys must be a boolean or a function");for(a=0,l=o.length;a1024,g&&(t.dump&&sp===t.dump.charCodeAt(0)?f+="?":f+="? "),f+=t.dump,g&&(f+=VQ(t,e)),!!gc(t,e+1,u,!0,g)&&(t.dump&&sp===t.dump.charCodeAt(0)?f+=":":f+=": ",f+=t.dump,n+=f));t.tag=s,t.dump=n||"{}"}function rK(t,e,r){var i,n,s,o,a,l;for(n=r?t.explicitTypes:t.implicitTypes,s=0,o=n.length;s tag resolver accepts not "'+l+'" style');t.dump=i}return!0}return!1}function gc(t,e,r,i,n,s){t.tag=null,t.dump=r,rK(t,r,!1)||rK(t,r,!0);var o=TU.call(t.dump);i&&(i=t.flowLevel<0||t.flowLevel>e);var a=o==="[object Object]"||o==="[object Array]",l,c;if(a&&(l=t.duplicates.indexOf(r),c=l!==-1),(t.tag!==null&&t.tag!=="?"||c||t.indent!==2&&e>0)&&(n=!1),c&&t.usedDuplicates[l])t.dump="*ref_"+l;else{if(a&&c&&!t.usedDuplicates[l]&&(t.usedDuplicates[l]=!0),o==="[object Object]")i&&Object.keys(t.dump).length!==0?(Nde(t,e,t.dump,n),c&&(t.dump="&ref_"+l+t.dump)):(Fde(t,e,t.dump),c&&(t.dump="&ref_"+l+" "+t.dump));else if(o==="[object Array]"){var u=t.noArrayIndent&&e>0?e-1:e;i&&t.dump.length!==0?(Rde(t,u,t.dump,n),c&&(t.dump="&ref_"+l+t.dump)):(Dde(t,u,t.dump),c&&(t.dump="&ref_"+l+" "+t.dump))}else if(o==="[object String]")t.tag!=="?"&&Pde(t,t.dump,e,s);else{if(t.skipInvalid)return!1;throw new np("unacceptable kind of an object to dump "+o)}t.tag!==null&&t.tag!=="?"&&(t.dump="!<"+t.tag+"> "+t.dump)}return!0}function Lde(t,e){var r=[],i=[],n,s;for(ZQ(t,r,i),n=0,s=i.length;n{"use strict";var gI=LU(),sK=nK();function fI(t){return function(){throw new Error("Function "+t+" is deprecated and cannot be used.")}}Or.exports.Type=li();Or.exports.Schema=lc();Or.exports.FAILSAFE_SCHEMA=sI();Or.exports.JSON_SCHEMA=GQ();Or.exports.CORE_SCHEMA=YQ();Or.exports.DEFAULT_SAFE_SCHEMA=Zu();Or.exports.DEFAULT_FULL_SCHEMA=tp();Or.exports.load=gI.load;Or.exports.loadAll=gI.loadAll;Or.exports.safeLoad=gI.safeLoad;Or.exports.safeLoadAll=gI.safeLoadAll;Or.exports.dump=sK.dump;Or.exports.safeDump=sK.safeDump;Or.exports.YAMLException=Vu();Or.exports.MINIMAL_SCHEMA=sI();Or.exports.SAFE_SCHEMA=Zu();Or.exports.DEFAULT_SCHEMA=tp();Or.exports.scan=fI("scan");Or.exports.parse=fI("parse");Or.exports.compose=fI("compose");Or.exports.addConstructor=fI("addConstructor")});var AK=w((n$e,aK)=>{"use strict";var Ode=oK();aK.exports=Ode});var cK=w((s$e,lK)=>{"use strict";function Mde(t,e){function r(){this.constructor=t}r.prototype=e.prototype,t.prototype=new r}function fc(t,e,r,i){this.message=t,this.expected=e,this.found=r,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,fc)}Mde(fc,Error);fc.buildMessage=function(t,e){var r={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;g({[He]:de})))},Y=function(R){return R},U=function(R){return R},J=Vs("correct indentation"),W=" ",ee=gr(" ",!1),Z=function(R){return R.length===LA*Gu},A=function(R){return R.length===(LA+1)*Gu},ne=function(){return LA++,!0},le=function(){return LA--,!0},Ae=function(){return Ou()},T=Vs("pseudostring"),L=/^[^\r\n\t ?:,\][{}#&*!|>'"%@`\-]/,Ee=Yn(["\r",`
+`," "," ","?",":",",","]","[","{","}","#","&","*","!","|",">","'",'"',"%","@","`","-"],!0,!1),we=/^[^\r\n\t ,\][{}:#"']/,qe=Yn(["\r",`
+`," "," ",",","]","[","{","}",":","#",'"',"'"],!0,!1),re=function(){return Ou().replace(/^ *| *$/g,"")},se="--",Qe=gr("--",!1),he=/^[a-zA-Z\/0-9]/,Fe=Yn([["a","z"],["A","Z"],"/",["0","9"]],!1,!1),Ue=/^[^\r\n\t :,]/,xe=Yn(["\r",`
+`," "," ",":",","],!0,!1),ve="null",pe=gr("null",!1),X=function(){return null},be="true",ce=gr("true",!1),fe=function(){return!0},gt="false",Ht=gr("false",!1),Mt=function(){return!1},mi=Vs("string"),jt='"',Qr=gr('"',!1),Ti=function(){return""},_s=function(R){return R},Un=function(R){return R.join("")},Kn=/^[^"\\\0-\x1F\x7F]/,vr=Yn(['"',"\\",["\0",""],"\x7F"],!0,!1),Hn='\\"',us=gr('\\"',!1),Ia=function(){return'"'},SA="\\\\",Du=gr("\\\\",!1),gs=function(){return"\\"},kA="\\/",ya=gr("\\/",!1),Ru=function(){return"/"},xA="\\b",PA=gr("\\b",!1),Sr=function(){return"\b"},jl="\\f",Fu=gr("\\f",!1),So=function(){return"\f"},Nu="\\n",Qh=gr("\\n",!1),vh=function(){return`
+`},oe="\\r",Oi=gr("\\r",!1),ko=function(){return"\r"},jn="\\t",Lu=gr("\\t",!1),vt=function(){return" "},Gl="\\u",Gn=gr("\\u",!1),fs=function(R,q,de,He){return String.fromCharCode(parseInt(`0x${R}${q}${de}${He}`))},hs=/^[0-9a-fA-F]/,pt=Yn([["0","9"],["a","f"],["A","F"]],!1,!1),xo=Vs("blank space"),lt=/^[ \t]/,mn=Yn([" "," "],!1,!1),v=Vs("white space"),Tt=/^[ \t\n\r]/,Tu=Yn([" "," ",`
+`,"\r"],!1,!1),Yl=`\r
+`,Sh=gr(`\r
+`,!1),kh=`
+`,xh=gr(`
+`,!1),Ph="\r",Dh=gr("\r",!1),G=0,yt=0,DA=[{line:1,column:1}],$i=0,ql=[],$e=0,wa;if("startRule"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule "`+e.startRule+'".');n=i[e.startRule]}function Ou(){return t.substring(yt,G)}function SE(){return En(yt,G)}function Rh(R,q){throw q=q!==void 0?q:En(yt,G),Wl([Vs(R)],t.substring(yt,G),q)}function kE(R,q){throw q=q!==void 0?q:En(yt,G),Mu(R,q)}function gr(R,q){return{type:"literal",text:R,ignoreCase:q}}function Yn(R,q,de){return{type:"class",parts:R,inverted:q,ignoreCase:de}}function Jl(){return{type:"any"}}function Fh(){return{type:"end"}}function Vs(R){return{type:"other",description:R}}function Ba(R){var q=DA[R],de;if(q)return q;for(de=R-1;!DA[de];)de--;for(q=DA[de],q={line:q.line,column:q.column};de$i&&($i=G,ql=[]),ql.push(R))}function Mu(R,q){return new fc(R,null,null,q)}function Wl(R,q,de){return new fc(fc.buildMessage(R,q),R,q,de)}function Xs(){var R;return R=Uu(),R}function zl(){var R,q,de;for(R=G,q=[],de=RA();de!==r;)q.push(de),de=RA();return q!==r&&(yt=R,q=s(q)),R=q,R}function RA(){var R,q,de,He,Te;return R=G,q=Qa(),q!==r?(t.charCodeAt(G)===45?(de=o,G++):(de=r,$e===0&&Oe(a)),de!==r?(He=Lr(),He!==r?(Te=ba(),Te!==r?(yt=R,q=l(Te),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R}function Uu(){var R,q,de;for(R=G,q=[],de=Ku();de!==r;)q.push(de),de=Ku();return q!==r&&(yt=R,q=c(q)),R=q,R}function Ku(){var R,q,de,He,Te,Xe,Et,Rt,qn;if(R=G,q=Lr(),q===r&&(q=null),q!==r){if(de=G,t.charCodeAt(G)===35?(He=u,G++):(He=r,$e===0&&Oe(g)),He!==r){if(Te=[],Xe=G,Et=G,$e++,Rt=eo(),$e--,Rt===r?Et=void 0:(G=Et,Et=r),Et!==r?(t.length>G?(Rt=t.charAt(G),G++):(Rt=r,$e===0&&Oe(f)),Rt!==r?(Et=[Et,Rt],Xe=Et):(G=Xe,Xe=r)):(G=Xe,Xe=r),Xe!==r)for(;Xe!==r;)Te.push(Xe),Xe=G,Et=G,$e++,Rt=eo(),$e--,Rt===r?Et=void 0:(G=Et,Et=r),Et!==r?(t.length>G?(Rt=t.charAt(G),G++):(Rt=r,$e===0&&Oe(f)),Rt!==r?(Et=[Et,Rt],Xe=Et):(G=Xe,Xe=r)):(G=Xe,Xe=r);else Te=r;Te!==r?(He=[He,Te],de=He):(G=de,de=r)}else G=de,de=r;if(de===r&&(de=null),de!==r){if(He=[],Te=$s(),Te!==r)for(;Te!==r;)He.push(Te),Te=$s();else He=r;He!==r?(yt=R,q=h(),R=q):(G=R,R=r)}else G=R,R=r}else G=R,R=r;if(R===r&&(R=G,q=Qa(),q!==r?(de=_l(),de!==r?(He=Lr(),He===r&&(He=null),He!==r?(t.charCodeAt(G)===58?(Te=p,G++):(Te=r,$e===0&&Oe(m)),Te!==r?(Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(Et=ba(),Et!==r?(yt=R,q=y(de,Et),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r&&(R=G,q=Qa(),q!==r?(de=Zs(),de!==r?(He=Lr(),He===r&&(He=null),He!==r?(t.charCodeAt(G)===58?(Te=p,G++):(Te=r,$e===0&&Oe(m)),Te!==r?(Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(Et=ba(),Et!==r?(yt=R,q=y(de,Et),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r))){if(R=G,q=Qa(),q!==r)if(de=Zs(),de!==r)if(He=Lr(),He!==r)if(Te=xE(),Te!==r){if(Xe=[],Et=$s(),Et!==r)for(;Et!==r;)Xe.push(Et),Et=$s();else Xe=r;Xe!==r?(yt=R,q=y(de,Te),R=q):(G=R,R=r)}else G=R,R=r;else G=R,R=r;else G=R,R=r;else G=R,R=r;if(R===r)if(R=G,q=Qa(),q!==r)if(de=Zs(),de!==r){if(He=[],Te=G,Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(t.charCodeAt(G)===44?(Et=Q,G++):(Et=r,$e===0&&Oe(S)),Et!==r?(Rt=Lr(),Rt===r&&(Rt=null),Rt!==r?(qn=Zs(),qn!==r?(yt=Te,Xe=x(de,qn),Te=Xe):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r),Te!==r)for(;Te!==r;)He.push(Te),Te=G,Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(t.charCodeAt(G)===44?(Et=Q,G++):(Et=r,$e===0&&Oe(S)),Et!==r?(Rt=Lr(),Rt===r&&(Rt=null),Rt!==r?(qn=Zs(),qn!==r?(yt=Te,Xe=x(de,qn),Te=Xe):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r);else He=r;He!==r?(Te=Lr(),Te===r&&(Te=null),Te!==r?(t.charCodeAt(G)===58?(Xe=p,G++):(Xe=r,$e===0&&Oe(m)),Xe!==r?(Et=Lr(),Et===r&&(Et=null),Et!==r?(Rt=ba(),Rt!==r?(yt=R,q=M(de,He,Rt),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)}else G=R,R=r;else G=R,R=r}return R}function ba(){var R,q,de,He,Te,Xe,Et;if(R=G,q=G,$e++,de=G,He=eo(),He!==r?(Te=it(),Te!==r?(t.charCodeAt(G)===45?(Xe=o,G++):(Xe=r,$e===0&&Oe(a)),Xe!==r?(Et=Lr(),Et!==r?(He=[He,Te,Xe,Et],de=He):(G=de,de=r)):(G=de,de=r)):(G=de,de=r)):(G=de,de=r),$e--,de!==r?(G=q,q=void 0):q=r,q!==r?(de=$s(),de!==r?(He=Po(),He!==r?(Te=zl(),Te!==r?(Xe=FA(),Xe!==r?(yt=R,q=Y(Te),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r&&(R=G,q=eo(),q!==r?(de=Po(),de!==r?(He=Uu(),He!==r?(Te=FA(),Te!==r?(yt=R,q=Y(He),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r))if(R=G,q=Vl(),q!==r){if(de=[],He=$s(),He!==r)for(;He!==r;)de.push(He),He=$s();else de=r;de!==r?(yt=R,q=U(q),R=q):(G=R,R=r)}else G=R,R=r;return R}function Qa(){var R,q,de;for($e++,R=G,q=[],t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));de!==r;)q.push(de),t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));return q!==r?(yt=G,de=Z(q),de?de=void 0:de=r,de!==r?(q=[q,de],R=q):(G=R,R=r)):(G=R,R=r),$e--,R===r&&(q=r,$e===0&&Oe(J)),R}function it(){var R,q,de;for(R=G,q=[],t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));de!==r;)q.push(de),t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));return q!==r?(yt=G,de=A(q),de?de=void 0:de=r,de!==r?(q=[q,de],R=q):(G=R,R=r)):(G=R,R=r),R}function Po(){var R;return yt=G,R=ne(),R?R=void 0:R=r,R}function FA(){var R;return yt=G,R=le(),R?R=void 0:R=r,R}function _l(){var R;return R=Xl(),R===r&&(R=Nh()),R}function Zs(){var R,q,de;if(R=Xl(),R===r){if(R=G,q=[],de=Hu(),de!==r)for(;de!==r;)q.push(de),de=Hu();else q=r;q!==r&&(yt=R,q=Ae()),R=q}return R}function Vl(){var R;return R=Lh(),R===r&&(R=PE(),R===r&&(R=Xl(),R===r&&(R=Nh()))),R}function xE(){var R;return R=Lh(),R===r&&(R=Xl(),R===r&&(R=Hu())),R}function Nh(){var R,q,de,He,Te,Xe;if($e++,R=G,L.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(Ee)),q!==r){for(de=[],He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(we.test(t.charAt(G))?(Xe=t.charAt(G),G++):(Xe=r,$e===0&&Oe(qe)),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);He!==r;)de.push(He),He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(we.test(t.charAt(G))?(Xe=t.charAt(G),G++):(Xe=r,$e===0&&Oe(qe)),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);de!==r?(yt=R,q=re(),R=q):(G=R,R=r)}else G=R,R=r;return $e--,R===r&&(q=r,$e===0&&Oe(T)),R}function Hu(){var R,q,de,He,Te;if(R=G,t.substr(G,2)===se?(q=se,G+=2):(q=r,$e===0&&Oe(Qe)),q===r&&(q=null),q!==r)if(he.test(t.charAt(G))?(de=t.charAt(G),G++):(de=r,$e===0&&Oe(Fe)),de!==r){for(He=[],Ue.test(t.charAt(G))?(Te=t.charAt(G),G++):(Te=r,$e===0&&Oe(xe));Te!==r;)He.push(Te),Ue.test(t.charAt(G))?(Te=t.charAt(G),G++):(Te=r,$e===0&&Oe(xe));He!==r?(yt=R,q=re(),R=q):(G=R,R=r)}else G=R,R=r;else G=R,R=r;return R}function Lh(){var R,q;return R=G,t.substr(G,4)===ve?(q=ve,G+=4):(q=r,$e===0&&Oe(pe)),q!==r&&(yt=R,q=X()),R=q,R}function PE(){var R,q;return R=G,t.substr(G,4)===be?(q=be,G+=4):(q=r,$e===0&&Oe(ce)),q!==r&&(yt=R,q=fe()),R=q,R===r&&(R=G,t.substr(G,5)===gt?(q=gt,G+=5):(q=r,$e===0&&Oe(Ht)),q!==r&&(yt=R,q=Mt()),R=q),R}function Xl(){var R,q,de,He;return $e++,R=G,t.charCodeAt(G)===34?(q=jt,G++):(q=r,$e===0&&Oe(Qr)),q!==r?(t.charCodeAt(G)===34?(de=jt,G++):(de=r,$e===0&&Oe(Qr)),de!==r?(yt=R,q=Ti(),R=q):(G=R,R=r)):(G=R,R=r),R===r&&(R=G,t.charCodeAt(G)===34?(q=jt,G++):(q=r,$e===0&&Oe(Qr)),q!==r?(de=DE(),de!==r?(t.charCodeAt(G)===34?(He=jt,G++):(He=r,$e===0&&Oe(Qr)),He!==r?(yt=R,q=_s(de),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)),$e--,R===r&&(q=r,$e===0&&Oe(mi)),R}function DE(){var R,q,de;if(R=G,q=[],de=ju(),de!==r)for(;de!==r;)q.push(de),de=ju();else q=r;return q!==r&&(yt=R,q=Un(q)),R=q,R}function ju(){var R,q,de,He,Te,Xe;return Kn.test(t.charAt(G))?(R=t.charAt(G),G++):(R=r,$e===0&&Oe(vr)),R===r&&(R=G,t.substr(G,2)===Hn?(q=Hn,G+=2):(q=r,$e===0&&Oe(us)),q!==r&&(yt=R,q=Ia()),R=q,R===r&&(R=G,t.substr(G,2)===SA?(q=SA,G+=2):(q=r,$e===0&&Oe(Du)),q!==r&&(yt=R,q=gs()),R=q,R===r&&(R=G,t.substr(G,2)===kA?(q=kA,G+=2):(q=r,$e===0&&Oe(ya)),q!==r&&(yt=R,q=Ru()),R=q,R===r&&(R=G,t.substr(G,2)===xA?(q=xA,G+=2):(q=r,$e===0&&Oe(PA)),q!==r&&(yt=R,q=Sr()),R=q,R===r&&(R=G,t.substr(G,2)===jl?(q=jl,G+=2):(q=r,$e===0&&Oe(Fu)),q!==r&&(yt=R,q=So()),R=q,R===r&&(R=G,t.substr(G,2)===Nu?(q=Nu,G+=2):(q=r,$e===0&&Oe(Qh)),q!==r&&(yt=R,q=vh()),R=q,R===r&&(R=G,t.substr(G,2)===oe?(q=oe,G+=2):(q=r,$e===0&&Oe(Oi)),q!==r&&(yt=R,q=ko()),R=q,R===r&&(R=G,t.substr(G,2)===jn?(q=jn,G+=2):(q=r,$e===0&&Oe(Lu)),q!==r&&(yt=R,q=vt()),R=q,R===r&&(R=G,t.substr(G,2)===Gl?(q=Gl,G+=2):(q=r,$e===0&&Oe(Gn)),q!==r?(de=NA(),de!==r?(He=NA(),He!==r?(Te=NA(),Te!==r?(Xe=NA(),Xe!==r?(yt=R,q=fs(de,He,Te,Xe),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)))))))))),R}function NA(){var R;return hs.test(t.charAt(G))?(R=t.charAt(G),G++):(R=r,$e===0&&Oe(pt)),R}function Lr(){var R,q;if($e++,R=[],lt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(mn)),q!==r)for(;q!==r;)R.push(q),lt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(mn));else R=r;return $e--,R===r&&(q=r,$e===0&&Oe(xo)),R}function RE(){var R,q;if($e++,R=[],Tt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(Tu)),q!==r)for(;q!==r;)R.push(q),Tt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(Tu));else R=r;return $e--,R===r&&(q=r,$e===0&&Oe(v)),R}function $s(){var R,q,de,He,Te,Xe;if(R=G,q=eo(),q!==r){for(de=[],He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(Xe=eo(),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);He!==r;)de.push(He),He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(Xe=eo(),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);de!==r?(q=[q,de],R=q):(G=R,R=r)}else G=R,R=r;return R}function eo(){var R;return t.substr(G,2)===Yl?(R=Yl,G+=2):(R=r,$e===0&&Oe(Sh)),R===r&&(t.charCodeAt(G)===10?(R=kh,G++):(R=r,$e===0&&Oe(xh)),R===r&&(t.charCodeAt(G)===13?(R=Ph,G++):(R=r,$e===0&&Oe(Dh)))),R}let Gu=2,LA=0;if(wa=n(),wa!==r&&G===t.length)return wa;throw wa!==r&&G{"use strict";var Yde=t=>{let e=!1,r=!1,i=!1;for(let n=0;n{if(!(typeof t=="string"||Array.isArray(t)))throw new TypeError("Expected the input to be `string | string[]`");e=Object.assign({pascalCase:!1},e);let r=n=>e.pascalCase?n.charAt(0).toUpperCase()+n.slice(1):n;return Array.isArray(t)?t=t.map(n=>n.trim()).filter(n=>n.length).join("-"):t=t.trim(),t.length===0?"":t.length===1?e.pascalCase?t.toUpperCase():t.toLowerCase():(t!==t.toLowerCase()&&(t=Yde(t)),t=t.replace(/^[_.\- ]+/,"").toLowerCase().replace(/[_.\- ]+(\w|$)/g,(n,s)=>s.toUpperCase()).replace(/\d+(\w|$)/g,n=>n.toUpperCase()),r(t))};tv.exports=pK;tv.exports.default=pK});var mK=w((u$e,CK)=>{CK.exports=[{name:"AppVeyor",constant:"APPVEYOR",env:"APPVEYOR",pr:"APPVEYOR_PULL_REQUEST_NUMBER"},{name:"Azure Pipelines",constant:"AZURE_PIPELINES",env:"SYSTEM_TEAMFOUNDATIONCOLLECTIONURI",pr:"SYSTEM_PULLREQUEST_PULLREQUESTID"},{name:"Appcircle",constant:"APPCIRCLE",env:"AC_APPCIRCLE"},{name:"Bamboo",constant:"BAMBOO",env:"bamboo_planKey"},{name:"Bitbucket Pipelines",constant:"BITBUCKET",env:"BITBUCKET_COMMIT",pr:"BITBUCKET_PR_ID"},{name:"Bitrise",constant:"BITRISE",env:"BITRISE_IO",pr:"BITRISE_PULL_REQUEST"},{name:"Buddy",constant:"BUDDY",env:"BUDDY_WORKSPACE_ID",pr:"BUDDY_EXECUTION_PULL_REQUEST_ID"},{name:"Buildkite",constant:"BUILDKITE",env:"BUILDKITE",pr:{env:"BUILDKITE_PULL_REQUEST",ne:"false"}},{name:"CircleCI",constant:"CIRCLE",env:"CIRCLECI",pr:"CIRCLE_PULL_REQUEST"},{name:"Cirrus CI",constant:"CIRRUS",env:"CIRRUS_CI",pr:"CIRRUS_PR"},{name:"AWS CodeBuild",constant:"CODEBUILD",env:"CODEBUILD_BUILD_ARN"},{name:"Codefresh",constant:"CODEFRESH",env:"CF_BUILD_ID",pr:{any:["CF_PULL_REQUEST_NUMBER","CF_PULL_REQUEST_ID"]}},{name:"Codeship",constant:"CODESHIP",env:{CI_NAME:"codeship"}},{name:"Drone",constant:"DRONE",env:"DRONE",pr:{DRONE_BUILD_EVENT:"pull_request"}},{name:"dsari",constant:"DSARI",env:"DSARI"},{name:"GitHub Actions",constant:"GITHUB_ACTIONS",env:"GITHUB_ACTIONS",pr:{GITHUB_EVENT_NAME:"pull_request"}},{name:"GitLab CI",constant:"GITLAB",env:"GITLAB_CI",pr:"CI_MERGE_REQUEST_ID"},{name:"GoCD",constant:"GOCD",env:"GO_PIPELINE_LABEL"},{name:"LayerCI",constant:"LAYERCI",env:"LAYERCI",pr:"LAYERCI_PULL_REQUEST"},{name:"Hudson",constant:"HUDSON",env:"HUDSON_URL"},{name:"Jenkins",constant:"JENKINS",env:["JENKINS_URL","BUILD_ID"],pr:{any:["ghprbPullId","CHANGE_ID"]}},{name:"Magnum CI",constant:"MAGNUM",env:"MAGNUM"},{name:"Netlify CI",constant:"NETLIFY",env:"NETLIFY",pr:{env:"PULL_REQUEST",ne:"false"}},{name:"Nevercode",constant:"NEVERCODE",env:"NEVERCODE",pr:{env:"NEVERCODE_PULL_REQUEST",ne:"false"}},{name:"Render",constant:"RENDER",env:"RENDER",pr:{IS_PULL_REQUEST:"true"}},{name:"Sail CI",constant:"SAIL",env:"SAILCI",pr:"SAIL_PULL_REQUEST_NUMBER"},{name:"Semaphore",constant:"SEMAPHORE",env:"SEMAPHORE",pr:"PULL_REQUEST_NUMBER"},{name:"Screwdriver",constant:"SCREWDRIVER",env:"SCREWDRIVER",pr:{env:"SD_PULL_REQUEST",ne:"false"}},{name:"Shippable",constant:"SHIPPABLE",env:"SHIPPABLE",pr:{IS_PULL_REQUEST:"true"}},{name:"Solano CI",constant:"SOLANO",env:"TDDIUM",pr:"TDDIUM_PR_ID"},{name:"Strider CD",constant:"STRIDER",env:"STRIDER"},{name:"TaskCluster",constant:"TASKCLUSTER",env:["TASK_ID","RUN_ID"]},{name:"TeamCity",constant:"TEAMCITY",env:"TEAMCITY_VERSION"},{name:"Travis CI",constant:"TRAVIS",env:"TRAVIS",pr:{env:"TRAVIS_PULL_REQUEST",ne:"false"}},{name:"Vercel",constant:"VERCEL",env:"NOW_BUILDER"},{name:"Visual Studio App Center",constant:"APPCENTER",env:"APPCENTER_BUILD_ID"}]});var hc=w(_n=>{"use strict";var EK=mK(),Fo=process.env;Object.defineProperty(_n,"_vendors",{value:EK.map(function(t){return t.constant})});_n.name=null;_n.isPR=null;EK.forEach(function(t){let r=(Array.isArray(t.env)?t.env:[t.env]).every(function(i){return IK(i)});if(_n[t.constant]=r,r)switch(_n.name=t.name,typeof t.pr){case"string":_n.isPR=!!Fo[t.pr];break;case"object":"env"in t.pr?_n.isPR=t.pr.env in Fo&&Fo[t.pr.env]!==t.pr.ne:"any"in t.pr?_n.isPR=t.pr.any.some(function(i){return!!Fo[i]}):_n.isPR=IK(t.pr);break;default:_n.isPR=null}});_n.isCI=!!(Fo.CI||Fo.CONTINUOUS_INTEGRATION||Fo.BUILD_NUMBER||Fo.RUN_ID||_n.name);function IK(t){return typeof t=="string"?!!Fo[t]:Object.keys(t).every(function(e){return Fo[e]===t[e]})}});var sg={};ft(sg,{KeyRelationship:()=>Cc,applyCascade:()=>fp,base64RegExp:()=>QK,colorStringAlphaRegExp:()=>bK,colorStringRegExp:()=>BK,computeKey:()=>GA,getPrintable:()=>ei,hasExactLength:()=>PK,hasForbiddenKeys:()=>wCe,hasKeyRelationship:()=>lv,hasMaxLength:()=>sCe,hasMinLength:()=>nCe,hasMutuallyExclusiveKeys:()=>BCe,hasRequiredKeys:()=>yCe,hasUniqueItems:()=>oCe,isArray:()=>Vde,isAtLeast:()=>lCe,isAtMost:()=>cCe,isBase64:()=>ECe,isBoolean:()=>Wde,isDate:()=>_de,isDict:()=>Zde,isEnum:()=>nn,isHexColor:()=>mCe,isISO8601:()=>CCe,isInExclusiveRange:()=>gCe,isInInclusiveRange:()=>uCe,isInstanceOf:()=>eCe,isInteger:()=>fCe,isJSON:()=>ICe,isLiteral:()=>qde,isLowerCase:()=>hCe,isNegative:()=>aCe,isNullable:()=>iCe,isNumber:()=>zde,isObject:()=>$de,isOneOf:()=>tCe,isOptional:()=>rCe,isPositive:()=>ACe,isString:()=>gp,isTuple:()=>Xde,isUUID4:()=>dCe,isUnknown:()=>xK,isUpperCase:()=>pCe,iso8601RegExp:()=>Av,makeCoercionFn:()=>dc,makeSetter:()=>kK,makeTrait:()=>SK,makeValidator:()=>St,matchesRegExp:()=>hp,plural:()=>CI,pushError:()=>mt,simpleKeyRegExp:()=>wK,uuid4RegExp:()=>vK});function St({test:t}){return SK(t)()}function ei(t){return t===null?"null":t===void 0?"undefined":t===""?"an empty string":JSON.stringify(t)}function GA(t,e){var r,i,n;return typeof e=="number"?`${(r=t==null?void 0:t.p)!==null&&r!==void 0?r:"."}[${e}]`:wK.test(e)?`${(i=t==null?void 0:t.p)!==null&&i!==void 0?i:""}.${e}`:`${(n=t==null?void 0:t.p)!==null&&n!==void 0?n:"."}[${JSON.stringify(e)}]`}function dc(t,e){return r=>{let i=t[e];return t[e]=r,dc(t,e).bind(null,i)}}function kK(t,e){return r=>{t[e]=r}}function CI(t,e,r){return t===1?e:r}function mt({errors:t,p:e}={},r){return t==null||t.push(`${e!=null?e:"."}: ${r}`),!1}function qde(t){return St({test:(e,r)=>e!==t?mt(r,`Expected a literal (got ${ei(t)})`):!0})}function nn(t){let e=Array.isArray(t)?t:Object.values(t),r=new Set(e);return St({test:(i,n)=>r.has(i)?!0:mt(n,`Expected a valid enumeration value (got ${ei(i)})`)})}var wK,BK,bK,QK,vK,Av,SK,xK,gp,Jde,Wde,zde,_de,Vde,Xde,Zde,$de,eCe,tCe,fp,rCe,iCe,nCe,sCe,PK,oCe,aCe,ACe,lCe,cCe,uCe,gCe,fCe,hp,hCe,pCe,dCe,CCe,mCe,ECe,ICe,yCe,wCe,BCe,Cc,bCe,lv,Es=hfe(()=>{wK=/^[a-zA-Z_][a-zA-Z0-9_]*$/,BK=/^#[0-9a-f]{6}$/i,bK=/^#[0-9a-f]{6}([0-9a-f]{2})?$/i,QK=/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/,vK=/^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12}$/i,Av=/^(?:[1-9]\d{3}(-?)(?:(?:0[1-9]|1[0-2])\1(?:0[1-9]|1\d|2[0-8])|(?:0[13-9]|1[0-2])\1(?:29|30)|(?:0[13578]|1[02])(?:\1)31|00[1-9]|0[1-9]\d|[12]\d{2}|3(?:[0-5]\d|6[0-5]))|(?:[1-9]\d(?:0[48]|[2468][048]|[13579][26])|(?:[2468][048]|[13579][26])00)(?:(-?)02(?:\2)29|-?366))T(?:[01]\d|2[0-3])(:?)[0-5]\d(?:\3[0-5]\d)?(?:Z|[+-][01]\d(?:\3[0-5]\d)?)$/,SK=t=>()=>t;xK=()=>St({test:(t,e)=>!0});gp=()=>St({test:(t,e)=>typeof t!="string"?mt(e,`Expected a string (got ${ei(t)})`):!0});Jde=new Map([["true",!0],["True",!0],["1",!0],[1,!0],["false",!1],["False",!1],["0",!1],[0,!1]]),Wde=()=>St({test:(t,e)=>{var r;if(typeof t!="boolean"){if(typeof(e==null?void 0:e.coercions)!="undefined"){if(typeof(e==null?void 0:e.coercion)=="undefined")return mt(e,"Unbound coercion result");let i=Jde.get(t);if(typeof i!="undefined")return e.coercions.push([(r=e.p)!==null&&r!==void 0?r:".",e.coercion.bind(null,i)]),!0}return mt(e,`Expected a boolean (got ${ei(t)})`)}return!0}}),zde=()=>St({test:(t,e)=>{var r;if(typeof t!="number"){if(typeof(e==null?void 0:e.coercions)!="undefined"){if(typeof(e==null?void 0:e.coercion)=="undefined")return mt(e,"Unbound coercion result");let i;if(typeof t=="string"){let n;try{n=JSON.parse(t)}catch(s){}if(typeof n=="number")if(JSON.stringify(n)===t)i=n;else return mt(e,`Received a number that can't be safely represented by the runtime (${t})`)}if(typeof i!="undefined")return e.coercions.push([(r=e.p)!==null&&r!==void 0?r:".",e.coercion.bind(null,i)]),!0}return mt(e,`Expected a number (got ${ei(t)})`)}return!0}}),_de=()=>St({test:(t,e)=>{var r;if(!(t instanceof Date)){if(typeof(e==null?void 0:e.coercions)!="undefined"){if(typeof(e==null?void 0:e.coercion)=="undefined")return mt(e,"Unbound coercion result");let i;if(typeof t=="string"&&Av.test(t))i=new Date(t);else{let n;if(typeof t=="string"){let s;try{s=JSON.parse(t)}catch(o){}typeof s=="number"&&(n=s)}else typeof t=="number"&&(n=t);if(typeof n!="undefined")if(Number.isSafeInteger(n)||!Number.isSafeInteger(n*1e3))i=new Date(n*1e3);else return mt(e,`Received a timestamp that can't be safely represented by the runtime (${t})`)}if(typeof i!="undefined")return e.coercions.push([(r=e.p)!==null&&r!==void 0?r:".",e.coercion.bind(null,i)]),!0}return mt(e,`Expected a date (got ${ei(t)})`)}return!0}}),Vde=(t,{delimiter:e}={})=>St({test:(r,i)=>{var n;if(typeof r=="string"&&typeof e!="undefined"&&typeof(i==null?void 0:i.coercions)!="undefined"){if(typeof(i==null?void 0:i.coercion)=="undefined")return mt(i,"Unbound coercion result");r=r.split(e),i.coercions.push([(n=i.p)!==null&&n!==void 0?n:".",i.coercion.bind(null,r)])}if(!Array.isArray(r))return mt(i,`Expected an array (got ${ei(r)})`);let s=!0;for(let o=0,a=r.length;o{let r=PK(t.length);return St({test:(i,n)=>{var s;if(typeof i=="string"&&typeof e!="undefined"&&typeof(n==null?void 0:n.coercions)!="undefined"){if(typeof(n==null?void 0:n.coercion)=="undefined")return mt(n,"Unbound coercion result");i=i.split(e),n.coercions.push([(s=n.p)!==null&&s!==void 0?s:".",n.coercion.bind(null,i)])}if(!Array.isArray(i))return mt(n,`Expected a tuple (got ${ei(i)})`);let o=r(i,Object.assign({},n));for(let a=0,l=i.length;aSt({test:(r,i)=>{if(typeof r!="object"||r===null)return mt(i,`Expected an object (got ${ei(r)})`);let n=Object.keys(r),s=!0;for(let o=0,a=n.length;o{let r=Object.keys(t);return St({test:(i,n)=>{if(typeof i!="object"||i===null)return mt(n,`Expected an object (got ${ei(i)})`);let s=new Set([...r,...Object.keys(i)]),o={},a=!0;for(let l of s){if(l==="constructor"||l==="__proto__")a=mt(Object.assign(Object.assign({},n),{p:GA(n,l)}),"Unsafe property name");else{let c=Object.prototype.hasOwnProperty.call(t,l)?t[l]:void 0,u=Object.prototype.hasOwnProperty.call(i,l)?i[l]:void 0;typeof c!="undefined"?a=c(u,Object.assign(Object.assign({},n),{p:GA(n,l),coercion:dc(i,l)}))&&a:e===null?a=mt(Object.assign(Object.assign({},n),{p:GA(n,l)}),`Extraneous property (got ${ei(u)})`):Object.defineProperty(o,l,{enumerable:!0,get:()=>u,set:kK(i,l)})}if(!a&&(n==null?void 0:n.errors)==null)break}return e!==null&&(a||(n==null?void 0:n.errors)!=null)&&(a=e(o,n)&&a),a}})},eCe=t=>St({test:(e,r)=>e instanceof t?!0:mt(r,`Expected an instance of ${t.name} (got ${ei(e)})`)}),tCe=(t,{exclusive:e=!1}={})=>St({test:(r,i)=>{var n,s,o;let a=[],l=typeof(i==null?void 0:i.errors)!="undefined"?[]:void 0;for(let c=0,u=t.length;c1?mt(i,`Expected to match exactly a single predicate (matched ${a.join(", ")})`):(o=i==null?void 0:i.errors)===null||o===void 0||o.push(...l),!1}}),fp=(t,e)=>St({test:(r,i)=>{var n,s;let o={value:r},a=typeof(i==null?void 0:i.coercions)!="undefined"?dc(o,"value"):void 0,l=typeof(i==null?void 0:i.coercions)!="undefined"?[]:void 0;if(!t(r,Object.assign(Object.assign({},i),{coercion:a,coercions:l})))return!1;let c=[];if(typeof l!="undefined")for(let[,u]of l)c.push(u());try{if(typeof(i==null?void 0:i.coercions)!="undefined"){if(o.value!==r){if(typeof(i==null?void 0:i.coercion)=="undefined")return mt(i,"Unbound coercion result");i.coercions.push([(n=i.p)!==null&&n!==void 0?n:".",i.coercion.bind(null,o.value)])}(s=i==null?void 0:i.coercions)===null||s===void 0||s.push(...l)}return e.every(u=>u(o.value,i))}finally{for(let u of c)u()}}}),rCe=t=>St({test:(e,r)=>typeof e=="undefined"?!0:t(e,r)}),iCe=t=>St({test:(e,r)=>e===null?!0:t(e,r)}),nCe=t=>St({test:(e,r)=>e.length>=t?!0:mt(r,`Expected to have a length of at least ${t} elements (got ${e.length})`)}),sCe=t=>St({test:(e,r)=>e.length<=t?!0:mt(r,`Expected to have a length of at most ${t} elements (got ${e.length})`)}),PK=t=>St({test:(e,r)=>e.length!==t?mt(r,`Expected to have a length of exactly ${t} elements (got ${e.length})`):!0}),oCe=({map:t}={})=>St({test:(e,r)=>{let i=new Set,n=new Set;for(let s=0,o=e.length;sSt({test:(t,e)=>t<=0?!0:mt(e,`Expected to be negative (got ${t})`)}),ACe=()=>St({test:(t,e)=>t>=0?!0:mt(e,`Expected to be positive (got ${t})`)}),lCe=t=>St({test:(e,r)=>e>=t?!0:mt(r,`Expected to be at least ${t} (got ${e})`)}),cCe=t=>St({test:(e,r)=>e<=t?!0:mt(r,`Expected to be at most ${t} (got ${e})`)}),uCe=(t,e)=>St({test:(r,i)=>r>=t&&r<=e?!0:mt(i,`Expected to be in the [${t}; ${e}] range (got ${r})`)}),gCe=(t,e)=>St({test:(r,i)=>r>=t&&rSt({test:(e,r)=>e!==Math.round(e)?mt(r,`Expected to be an integer (got ${e})`):Number.isSafeInteger(e)?!0:mt(r,`Expected to be a safe integer (got ${e})`)}),hp=t=>St({test:(e,r)=>t.test(e)?!0:mt(r,`Expected to match the pattern ${t.toString()} (got ${ei(e)})`)}),hCe=()=>St({test:(t,e)=>t!==t.toLowerCase()?mt(e,`Expected to be all-lowercase (got ${t})`):!0}),pCe=()=>St({test:(t,e)=>t!==t.toUpperCase()?mt(e,`Expected to be all-uppercase (got ${t})`):!0}),dCe=()=>St({test:(t,e)=>vK.test(t)?!0:mt(e,`Expected to be a valid UUID v4 (got ${ei(t)})`)}),CCe=()=>St({test:(t,e)=>Av.test(t)?!1:mt(e,`Expected to be a valid ISO 8601 date string (got ${ei(t)})`)}),mCe=({alpha:t=!1})=>St({test:(e,r)=>(t?BK.test(e):bK.test(e))?!0:mt(r,`Expected to be a valid hexadecimal color string (got ${ei(e)})`)}),ECe=()=>St({test:(t,e)=>QK.test(t)?!0:mt(e,`Expected to be a valid base 64 string (got ${ei(t)})`)}),ICe=(t=xK())=>St({test:(e,r)=>{let i;try{i=JSON.parse(e)}catch(n){return mt(r,`Expected to be a valid JSON string (got ${ei(e)})`)}return t(i,r)}}),yCe=t=>{let e=new Set(t);return St({test:(r,i)=>{let n=new Set(Object.keys(r)),s=[];for(let o of e)n.has(o)||s.push(o);return s.length>0?mt(i,`Missing required ${CI(s.length,"property","properties")} ${s.map(o=>`"${o}"`).join(", ")}`):!0}})},wCe=t=>{let e=new Set(t);return St({test:(r,i)=>{let n=new Set(Object.keys(r)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>0?mt(i,`Forbidden ${CI(s.length,"property","properties")} ${s.map(o=>`"${o}"`).join(", ")}`):!0}})},BCe=t=>{let e=new Set(t);return St({test:(r,i)=>{let n=new Set(Object.keys(r)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>1?mt(i,`Mutually exclusive properties ${s.map(o=>`"${o}"`).join(", ")}`):!0}})};(function(t){t.Forbids="Forbids",t.Requires="Requires"})(Cc||(Cc={}));bCe={[Cc.Forbids]:{expect:!1,message:"forbids using"},[Cc.Requires]:{expect:!0,message:"requires using"}},lv=(t,e,r,{ignore:i=[]}={})=>{let n=new Set(i),s=new Set(r),o=bCe[e];return St({test:(a,l)=>{let c=new Set(Object.keys(a));if(!c.has(t)||n.has(a[t]))return!0;let u=[];for(let g of s)(c.has(g)&&!n.has(a[g]))!==o.expect&&u.push(g);return u.length>=1?mt(l,`Property "${t}" ${o.message} ${CI(u.length,"property","properties")} ${u.map(g=>`"${g}"`).join(", ")}`):!0}})}});var _K=w((fet,zK)=>{"use strict";zK.exports=(t,...e)=>new Promise(r=>{r(t(...e))})});var ag=w((het,dv)=>{"use strict";var HCe=_K(),VK=t=>{if(t<1)throw new TypeError("Expected `concurrency` to be a number from 1 and up");let e=[],r=0,i=()=>{r--,e.length>0&&e.shift()()},n=(a,l,...c)=>{r++;let u=HCe(a,...c);l(u),u.then(i,i)},s=(a,l,...c)=>{rnew Promise(c=>s(a,c,...l));return Object.defineProperties(o,{activeCount:{get:()=>r},pendingCount:{get:()=>e.length}}),o};dv.exports=VK;dv.exports.default=VK});var mp=w((det,XK)=>{var jCe="2.0.0",GCe=256,YCe=Number.MAX_SAFE_INTEGER||9007199254740991,qCe=16;XK.exports={SEMVER_SPEC_VERSION:jCe,MAX_LENGTH:GCe,MAX_SAFE_INTEGER:YCe,MAX_SAFE_COMPONENT_LENGTH:qCe}});var Ep=w((Cet,ZK)=>{var JCe=typeof process=="object"&&process.env&&process.env.NODE_DEBUG&&/\bsemver\b/i.test(process.env.NODE_DEBUG)?(...t)=>console.error("SEMVER",...t):()=>{};ZK.exports=JCe});var mc=w((qA,$K)=>{var{MAX_SAFE_COMPONENT_LENGTH:Cv}=mp(),WCe=Ep();qA=$K.exports={};var zCe=qA.re=[],tt=qA.src=[],rt=qA.t={},_Ce=0,kt=(t,e,r)=>{let i=_Ce++;WCe(i,e),rt[t]=i,tt[i]=e,zCe[i]=new RegExp(e,r?"g":void 0)};kt("NUMERICIDENTIFIER","0|[1-9]\\d*");kt("NUMERICIDENTIFIERLOOSE","[0-9]+");kt("NONNUMERICIDENTIFIER","\\d*[a-zA-Z-][a-zA-Z0-9-]*");kt("MAINVERSION",`(${tt[rt.NUMERICIDENTIFIER]})\\.(${tt[rt.NUMERICIDENTIFIER]})\\.(${tt[rt.NUMERICIDENTIFIER]})`);kt("MAINVERSIONLOOSE",`(${tt[rt.NUMERICIDENTIFIERLOOSE]})\\.(${tt[rt.NUMERICIDENTIFIERLOOSE]})\\.(${tt[rt.NUMERICIDENTIFIERLOOSE]})`);kt("PRERELEASEIDENTIFIER",`(?:${tt[rt.NUMERICIDENTIFIER]}|${tt[rt.NONNUMERICIDENTIFIER]})`);kt("PRERELEASEIDENTIFIERLOOSE",`(?:${tt[rt.NUMERICIDENTIFIERLOOSE]}|${tt[rt.NONNUMERICIDENTIFIER]})`);kt("PRERELEASE",`(?:-(${tt[rt.PRERELEASEIDENTIFIER]}(?:\\.${tt[rt.PRERELEASEIDENTIFIER]})*))`);kt("PRERELEASELOOSE",`(?:-?(${tt[rt.PRERELEASEIDENTIFIERLOOSE]}(?:\\.${tt[rt.PRERELEASEIDENTIFIERLOOSE]})*))`);kt("BUILDIDENTIFIER","[0-9A-Za-z-]+");kt("BUILD",`(?:\\+(${tt[rt.BUILDIDENTIFIER]}(?:\\.${tt[rt.BUILDIDENTIFIER]})*))`);kt("FULLPLAIN",`v?${tt[rt.MAINVERSION]}${tt[rt.PRERELEASE]}?${tt[rt.BUILD]}?`);kt("FULL",`^${tt[rt.FULLPLAIN]}$`);kt("LOOSEPLAIN",`[v=\\s]*${tt[rt.MAINVERSIONLOOSE]}${tt[rt.PRERELEASELOOSE]}?${tt[rt.BUILD]}?`);kt("LOOSE",`^${tt[rt.LOOSEPLAIN]}$`);kt("GTLT","((?:<|>)?=?)");kt("XRANGEIDENTIFIERLOOSE",`${tt[rt.NUMERICIDENTIFIERLOOSE]}|x|X|\\*`);kt("XRANGEIDENTIFIER",`${tt[rt.NUMERICIDENTIFIER]}|x|X|\\*`);kt("XRANGEPLAIN",`[v=\\s]*(${tt[rt.XRANGEIDENTIFIER]})(?:\\.(${tt[rt.XRANGEIDENTIFIER]})(?:\\.(${tt[rt.XRANGEIDENTIFIER]})(?:${tt[rt.PRERELEASE]})?${tt[rt.BUILD]}?)?)?`);kt("XRANGEPLAINLOOSE",`[v=\\s]*(${tt[rt.XRANGEIDENTIFIERLOOSE]})(?:\\.(${tt[rt.XRANGEIDENTIFIERLOOSE]})(?:\\.(${tt[rt.XRANGEIDENTIFIERLOOSE]})(?:${tt[rt.PRERELEASELOOSE]})?${tt[rt.BUILD]}?)?)?`);kt("XRANGE",`^${tt[rt.GTLT]}\\s*${tt[rt.XRANGEPLAIN]}$`);kt("XRANGELOOSE",`^${tt[rt.GTLT]}\\s*${tt[rt.XRANGEPLAINLOOSE]}$`);kt("COERCE",`(^|[^\\d])(\\d{1,${Cv}})(?:\\.(\\d{1,${Cv}}))?(?:\\.(\\d{1,${Cv}}))?(?:$|[^\\d])`);kt("COERCERTL",tt[rt.COERCE],!0);kt("LONETILDE","(?:~>?)");kt("TILDETRIM",`(\\s*)${tt[rt.LONETILDE]}\\s+`,!0);qA.tildeTrimReplace="$1~";kt("TILDE",`^${tt[rt.LONETILDE]}${tt[rt.XRANGEPLAIN]}$`);kt("TILDELOOSE",`^${tt[rt.LONETILDE]}${tt[rt.XRANGEPLAINLOOSE]}$`);kt("LONECARET","(?:\\^)");kt("CARETTRIM",`(\\s*)${tt[rt.LONECARET]}\\s+`,!0);qA.caretTrimReplace="$1^";kt("CARET",`^${tt[rt.LONECARET]}${tt[rt.XRANGEPLAIN]}$`);kt("CARETLOOSE",`^${tt[rt.LONECARET]}${tt[rt.XRANGEPLAINLOOSE]}$`);kt("COMPARATORLOOSE",`^${tt[rt.GTLT]}\\s*(${tt[rt.LOOSEPLAIN]})$|^$`);kt("COMPARATOR",`^${tt[rt.GTLT]}\\s*(${tt[rt.FULLPLAIN]})$|^$`);kt("COMPARATORTRIM",`(\\s*)${tt[rt.GTLT]}\\s*(${tt[rt.LOOSEPLAIN]}|${tt[rt.XRANGEPLAIN]})`,!0);qA.comparatorTrimReplace="$1$2$3";kt("HYPHENRANGE",`^\\s*(${tt[rt.XRANGEPLAIN]})\\s+-\\s+(${tt[rt.XRANGEPLAIN]})\\s*$`);kt("HYPHENRANGELOOSE",`^\\s*(${tt[rt.XRANGEPLAINLOOSE]})\\s+-\\s+(${tt[rt.XRANGEPLAINLOOSE]})\\s*$`);kt("STAR","(<|>)?=?\\s*\\*");kt("GTE0","^\\s*>=\\s*0.0.0\\s*$");kt("GTE0PRE","^\\s*>=\\s*0.0.0-0\\s*$")});var Ip=w((met,e2)=>{var VCe=["includePrerelease","loose","rtl"],XCe=t=>t?typeof t!="object"?{loose:!0}:VCe.filter(e=>t[e]).reduce((e,r)=>(e[r]=!0,e),{}):{};e2.exports=XCe});var bI=w((Eet,t2)=>{var r2=/^[0-9]+$/,i2=(t,e)=>{let r=r2.test(t),i=r2.test(e);return r&&i&&(t=+t,e=+e),t===e?0:r&&!i?-1:i&&!r?1:ti2(e,t);t2.exports={compareIdentifiers:i2,rcompareIdentifiers:ZCe}});var Hi=w((Iet,n2)=>{var QI=Ep(),{MAX_LENGTH:s2,MAX_SAFE_INTEGER:vI}=mp(),{re:o2,t:a2}=mc(),$Ce=Ip(),{compareIdentifiers:yp}=bI(),ys=class{constructor(e,r){if(r=$Ce(r),e instanceof ys){if(e.loose===!!r.loose&&e.includePrerelease===!!r.includePrerelease)return e;e=e.version}else if(typeof e!="string")throw new TypeError(`Invalid Version: ${e}`);if(e.length>s2)throw new TypeError(`version is longer than ${s2} characters`);QI("SemVer",e,r),this.options=r,this.loose=!!r.loose,this.includePrerelease=!!r.includePrerelease;let i=e.trim().match(r.loose?o2[a2.LOOSE]:o2[a2.FULL]);if(!i)throw new TypeError(`Invalid Version: ${e}`);if(this.raw=e,this.major=+i[1],this.minor=+i[2],this.patch=+i[3],this.major>vI||this.major<0)throw new TypeError("Invalid major version");if(this.minor>vI||this.minor<0)throw new TypeError("Invalid minor version");if(this.patch>vI||this.patch<0)throw new TypeError("Invalid patch version");i[4]?this.prerelease=i[4].split(".").map(n=>{if(/^[0-9]+$/.test(n)){let s=+n;if(s>=0&&s=0;)typeof this.prerelease[i]=="number"&&(this.prerelease[i]++,i=-2);i===-1&&this.prerelease.push(0)}r&&(this.prerelease[0]===r?isNaN(this.prerelease[1])&&(this.prerelease=[r,0]):this.prerelease=[r,0]);break;default:throw new Error(`invalid increment argument: ${e}`)}return this.format(),this.raw=this.version,this}};n2.exports=ys});var Ec=w((yet,A2)=>{var{MAX_LENGTH:eme}=mp(),{re:l2,t:c2}=mc(),u2=Hi(),tme=Ip(),rme=(t,e)=>{if(e=tme(e),t instanceof u2)return t;if(typeof t!="string"||t.length>eme||!(e.loose?l2[c2.LOOSE]:l2[c2.FULL]).test(t))return null;try{return new u2(t,e)}catch(i){return null}};A2.exports=rme});var f2=w((wet,g2)=>{var ime=Ec(),nme=(t,e)=>{let r=ime(t,e);return r?r.version:null};g2.exports=nme});var p2=w((Bet,h2)=>{var sme=Ec(),ome=(t,e)=>{let r=sme(t.trim().replace(/^[=v]+/,""),e);return r?r.version:null};h2.exports=ome});var C2=w((bet,d2)=>{var ame=Hi(),Ame=(t,e,r,i)=>{typeof r=="string"&&(i=r,r=void 0);try{return new ame(t,r).inc(e,i).version}catch(n){return null}};d2.exports=Ame});var ws=w((Qet,m2)=>{var E2=Hi(),lme=(t,e,r)=>new E2(t,r).compare(new E2(e,r));m2.exports=lme});var SI=w((vet,I2)=>{var cme=ws(),ume=(t,e,r)=>cme(t,e,r)===0;I2.exports=ume});var B2=w((ket,y2)=>{var w2=Ec(),gme=SI(),fme=(t,e)=>{if(gme(t,e))return null;{let r=w2(t),i=w2(e),n=r.prerelease.length||i.prerelease.length,s=n?"pre":"",o=n?"prerelease":"";for(let a in r)if((a==="major"||a==="minor"||a==="patch")&&r[a]!==i[a])return s+a;return o}};y2.exports=fme});var Q2=w((xet,b2)=>{var hme=Hi(),pme=(t,e)=>new hme(t,e).major;b2.exports=pme});var S2=w((Pet,v2)=>{var dme=Hi(),Cme=(t,e)=>new dme(t,e).minor;v2.exports=Cme});var x2=w((Det,k2)=>{var mme=Hi(),Eme=(t,e)=>new mme(t,e).patch;k2.exports=Eme});var D2=w((Ret,P2)=>{var Ime=Ec(),yme=(t,e)=>{let r=Ime(t,e);return r&&r.prerelease.length?r.prerelease:null};P2.exports=yme});var F2=w((Fet,R2)=>{var wme=ws(),Bme=(t,e,r)=>wme(e,t,r);R2.exports=Bme});var L2=w((Net,N2)=>{var bme=ws(),Qme=(t,e)=>bme(t,e,!0);N2.exports=Qme});var kI=w((Let,T2)=>{var O2=Hi(),vme=(t,e,r)=>{let i=new O2(t,r),n=new O2(e,r);return i.compare(n)||i.compareBuild(n)};T2.exports=vme});var U2=w((Tet,M2)=>{var Sme=kI(),kme=(t,e)=>t.sort((r,i)=>Sme(r,i,e));M2.exports=kme});var H2=w((Oet,K2)=>{var xme=kI(),Pme=(t,e)=>t.sort((r,i)=>xme(i,r,e));K2.exports=Pme});var wp=w((Met,j2)=>{var Dme=ws(),Rme=(t,e,r)=>Dme(t,e,r)>0;j2.exports=Rme});var xI=w((Uet,G2)=>{var Fme=ws(),Nme=(t,e,r)=>Fme(t,e,r)<0;G2.exports=Nme});var mv=w((Ket,Y2)=>{var Lme=ws(),Tme=(t,e,r)=>Lme(t,e,r)!==0;Y2.exports=Tme});var PI=w((Het,q2)=>{var Ome=ws(),Mme=(t,e,r)=>Ome(t,e,r)>=0;q2.exports=Mme});var DI=w((jet,J2)=>{var Ume=ws(),Kme=(t,e,r)=>Ume(t,e,r)<=0;J2.exports=Kme});var Ev=w((Get,W2)=>{var Hme=SI(),jme=mv(),Gme=wp(),Yme=PI(),qme=xI(),Jme=DI(),Wme=(t,e,r,i)=>{switch(e){case"===":return typeof t=="object"&&(t=t.version),typeof r=="object"&&(r=r.version),t===r;case"!==":return typeof t=="object"&&(t=t.version),typeof r=="object"&&(r=r.version),t!==r;case"":case"=":case"==":return Hme(t,r,i);case"!=":return jme(t,r,i);case">":return Gme(t,r,i);case">=":return Yme(t,r,i);case"<":return qme(t,r,i);case"<=":return Jme(t,r,i);default:throw new TypeError(`Invalid operator: ${e}`)}};W2.exports=Wme});var _2=w((Yet,z2)=>{var zme=Hi(),_me=Ec(),{re:RI,t:FI}=mc(),Vme=(t,e)=>{if(t instanceof zme)return t;if(typeof t=="number"&&(t=String(t)),typeof t!="string")return null;e=e||{};let r=null;if(!e.rtl)r=t.match(RI[FI.COERCE]);else{let i;for(;(i=RI[FI.COERCERTL].exec(t))&&(!r||r.index+r[0].length!==t.length);)(!r||i.index+i[0].length!==r.index+r[0].length)&&(r=i),RI[FI.COERCERTL].lastIndex=i.index+i[1].length+i[2].length;RI[FI.COERCERTL].lastIndex=-1}return r===null?null:_me(`${r[2]}.${r[3]||"0"}.${r[4]||"0"}`,e)};z2.exports=Vme});var X2=w((qet,V2)=>{"use strict";V2.exports=function(t){t.prototype[Symbol.iterator]=function*(){for(let e=this.head;e;e=e.next)yield e.value}}});var Bp=w((Jet,Z2)=>{"use strict";Z2.exports=Gt;Gt.Node=Ic;Gt.create=Gt;function Gt(t){var e=this;if(e instanceof Gt||(e=new Gt),e.tail=null,e.head=null,e.length=0,t&&typeof t.forEach=="function")t.forEach(function(n){e.push(n)});else if(arguments.length>0)for(var r=0,i=arguments.length;r1)r=e;else if(this.head)i=this.head.next,r=this.head.value;else throw new TypeError("Reduce of empty list with no initial value");for(var n=0;i!==null;n++)r=t(r,i.value,n),i=i.next;return r};Gt.prototype.reduceReverse=function(t,e){var r,i=this.tail;if(arguments.length>1)r=e;else if(this.tail)i=this.tail.prev,r=this.tail.value;else throw new TypeError("Reduce of empty list with no initial value");for(var n=this.length-1;i!==null;n--)r=t(r,i.value,n),i=i.prev;return r};Gt.prototype.toArray=function(){for(var t=new Array(this.length),e=0,r=this.head;r!==null;e++)t[e]=r.value,r=r.next;return t};Gt.prototype.toArrayReverse=function(){for(var t=new Array(this.length),e=0,r=this.tail;r!==null;e++)t[e]=r.value,r=r.prev;return t};Gt.prototype.slice=function(t,e){e=e||this.length,e<0&&(e+=this.length),t=t||0,t<0&&(t+=this.length);var r=new Gt;if(ethis.length&&(e=this.length);for(var i=0,n=this.head;n!==null&&ithis.length&&(e=this.length);for(var i=this.length,n=this.tail;n!==null&&i>e;i--)n=n.prev;for(;n!==null&&i>t;i--,n=n.prev)r.push(n.value);return r};Gt.prototype.splice=function(t,e,...r){t>this.length&&(t=this.length-1),t<0&&(t=this.length+t);for(var i=0,n=this.head;n!==null&&i{"use strict";var eEe=Bp(),yc=Symbol("max"),Ta=Symbol("length"),Ag=Symbol("lengthCalculator"),bp=Symbol("allowStale"),wc=Symbol("maxAge"),Oa=Symbol("dispose"),eH=Symbol("noDisposeOnSet"),Ii=Symbol("lruList"),no=Symbol("cache"),tH=Symbol("updateAgeOnGet"),Iv=()=>1,rH=class{constructor(e){if(typeof e=="number"&&(e={max:e}),e||(e={}),e.max&&(typeof e.max!="number"||e.max<0))throw new TypeError("max must be a non-negative number");let r=this[yc]=e.max||Infinity,i=e.length||Iv;if(this[Ag]=typeof i!="function"?Iv:i,this[bp]=e.stale||!1,e.maxAge&&typeof e.maxAge!="number")throw new TypeError("maxAge must be a number");this[wc]=e.maxAge||0,this[Oa]=e.dispose,this[eH]=e.noDisposeOnSet||!1,this[tH]=e.updateAgeOnGet||!1,this.reset()}set max(e){if(typeof e!="number"||e<0)throw new TypeError("max must be a non-negative number");this[yc]=e||Infinity,Qp(this)}get max(){return this[yc]}set allowStale(e){this[bp]=!!e}get allowStale(){return this[bp]}set maxAge(e){if(typeof e!="number")throw new TypeError("maxAge must be a non-negative number");this[wc]=e,Qp(this)}get maxAge(){return this[wc]}set lengthCalculator(e){typeof e!="function"&&(e=Iv),e!==this[Ag]&&(this[Ag]=e,this[Ta]=0,this[Ii].forEach(r=>{r.length=this[Ag](r.value,r.key),this[Ta]+=r.length})),Qp(this)}get lengthCalculator(){return this[Ag]}get length(){return this[Ta]}get itemCount(){return this[Ii].length}rforEach(e,r){r=r||this;for(let i=this[Ii].tail;i!==null;){let n=i.prev;nH(this,e,i,r),i=n}}forEach(e,r){r=r||this;for(let i=this[Ii].head;i!==null;){let n=i.next;nH(this,e,i,r),i=n}}keys(){return this[Ii].toArray().map(e=>e.key)}values(){return this[Ii].toArray().map(e=>e.value)}reset(){this[Oa]&&this[Ii]&&this[Ii].length&&this[Ii].forEach(e=>this[Oa](e.key,e.value)),this[no]=new Map,this[Ii]=new eEe,this[Ta]=0}dump(){return this[Ii].map(e=>NI(this,e)?!1:{k:e.key,v:e.value,e:e.now+(e.maxAge||0)}).toArray().filter(e=>e)}dumpLru(){return this[Ii]}set(e,r,i){if(i=i||this[wc],i&&typeof i!="number")throw new TypeError("maxAge must be a number");let n=i?Date.now():0,s=this[Ag](r,e);if(this[no].has(e)){if(s>this[yc])return lg(this,this[no].get(e)),!1;let l=this[no].get(e).value;return this[Oa]&&(this[eH]||this[Oa](e,l.value)),l.now=n,l.maxAge=i,l.value=r,this[Ta]+=s-l.length,l.length=s,this.get(e),Qp(this),!0}let o=new iH(e,r,s,n,i);return o.length>this[yc]?(this[Oa]&&this[Oa](e,r),!1):(this[Ta]+=o.length,this[Ii].unshift(o),this[no].set(e,this[Ii].head),Qp(this),!0)}has(e){if(!this[no].has(e))return!1;let r=this[no].get(e).value;return!NI(this,r)}get(e){return yv(this,e,!0)}peek(e){return yv(this,e,!1)}pop(){let e=this[Ii].tail;return e?(lg(this,e),e.value):null}del(e){lg(this,this[no].get(e))}load(e){this.reset();let r=Date.now();for(let i=e.length-1;i>=0;i--){let n=e[i],s=n.e||0;if(s===0)this.set(n.k,n.v);else{let o=s-r;o>0&&this.set(n.k,n.v,o)}}}prune(){this[no].forEach((e,r)=>yv(this,r,!1))}},yv=(t,e,r)=>{let i=t[no].get(e);if(i){let n=i.value;if(NI(t,n)){if(lg(t,i),!t[bp])return}else r&&(t[tH]&&(i.value.now=Date.now()),t[Ii].unshiftNode(i));return n.value}},NI=(t,e)=>{if(!e||!e.maxAge&&!t[wc])return!1;let r=Date.now()-e.now;return e.maxAge?r>e.maxAge:t[wc]&&r>t[wc]},Qp=t=>{if(t[Ta]>t[yc])for(let e=t[Ii].tail;t[Ta]>t[yc]&&e!==null;){let r=e.prev;lg(t,e),e=r}},lg=(t,e)=>{if(e){let r=e.value;t[Oa]&&t[Oa](r.key,r.value),t[Ta]-=r.length,t[no].delete(r.key),t[Ii].removeNode(e)}},iH=class{constructor(e,r,i,n,s){this.key=e,this.value=r,this.length=i,this.now=n,this.maxAge=s||0}},nH=(t,e,r,i)=>{let n=r.value;NI(t,n)&&(lg(t,r),t[bp]||(n=void 0)),n&&e.call(i,n.value,n.key,t)};$2.exports=rH});var Bs=w((zet,oH)=>{var cg=class{constructor(e,r){if(r=tEe(r),e instanceof cg)return e.loose===!!r.loose&&e.includePrerelease===!!r.includePrerelease?e:new cg(e.raw,r);if(e instanceof wv)return this.raw=e.value,this.set=[[e]],this.format(),this;if(this.options=r,this.loose=!!r.loose,this.includePrerelease=!!r.includePrerelease,this.raw=e,this.set=e.split(/\s*\|\|\s*/).map(i=>this.parseRange(i.trim())).filter(i=>i.length),!this.set.length)throw new TypeError(`Invalid SemVer Range: ${e}`);if(this.set.length>1){let i=this.set[0];if(this.set=this.set.filter(n=>!AH(n[0])),this.set.length===0)this.set=[i];else if(this.set.length>1){for(let n of this.set)if(n.length===1&&oEe(n[0])){this.set=[n];break}}}this.format()}format(){return this.range=this.set.map(e=>e.join(" ").trim()).join("||").trim(),this.range}toString(){return this.range}parseRange(e){e=e.trim();let i=`parseRange:${Object.keys(this.options).join(",")}:${e}`,n=aH.get(i);if(n)return n;let s=this.options.loose,o=s?ji[ki.HYPHENRANGELOOSE]:ji[ki.HYPHENRANGE];e=e.replace(o,lEe(this.options.includePrerelease)),Wr("hyphen replace",e),e=e.replace(ji[ki.COMPARATORTRIM],iEe),Wr("comparator trim",e,ji[ki.COMPARATORTRIM]),e=e.replace(ji[ki.TILDETRIM],nEe),e=e.replace(ji[ki.CARETTRIM],sEe),e=e.split(/\s+/).join(" ");let a=s?ji[ki.COMPARATORLOOSE]:ji[ki.COMPARATOR],l=e.split(" ").map(f=>aEe(f,this.options)).join(" ").split(/\s+/).map(f=>AEe(f,this.options)).filter(this.options.loose?f=>!!f.match(a):()=>!0).map(f=>new wv(f,this.options)),c=l.length,u=new Map;for(let f of l){if(AH(f))return[f];u.set(f.value,f)}u.size>1&&u.has("")&&u.delete("");let g=[...u.values()];return aH.set(i,g),g}intersects(e,r){if(!(e instanceof cg))throw new TypeError("a Range is required");return this.set.some(i=>lH(i,r)&&e.set.some(n=>lH(n,r)&&i.every(s=>n.every(o=>s.intersects(o,r)))))}test(e){if(!e)return!1;if(typeof e=="string")try{e=new rEe(e,this.options)}catch(r){return!1}for(let r=0;rt.value==="<0.0.0-0",oEe=t=>t.value==="",lH=(t,e)=>{let r=!0,i=t.slice(),n=i.pop();for(;r&&i.length;)r=i.every(s=>n.intersects(s,e)),n=i.pop();return r},aEe=(t,e)=>(Wr("comp",t,e),t=fEe(t,e),Wr("caret",t),t=gEe(t,e),Wr("tildes",t),t=hEe(t,e),Wr("xrange",t),t=pEe(t,e),Wr("stars",t),t),on=t=>!t||t.toLowerCase()==="x"||t==="*",gEe=(t,e)=>t.trim().split(/\s+/).map(r=>dEe(r,e)).join(" "),dEe=(t,e)=>{let r=e.loose?ji[ki.TILDELOOSE]:ji[ki.TILDE];return t.replace(r,(i,n,s,o,a)=>{Wr("tilde",t,i,n,s,o,a);let l;return on(n)?l="":on(s)?l=`>=${n}.0.0 <${+n+1}.0.0-0`:on(o)?l=`>=${n}.${s}.0 <${n}.${+s+1}.0-0`:a?(Wr("replaceTilde pr",a),l=`>=${n}.${s}.${o}-${a} <${n}.${+s+1}.0-0`):l=`>=${n}.${s}.${o} <${n}.${+s+1}.0-0`,Wr("tilde return",l),l})},fEe=(t,e)=>t.trim().split(/\s+/).map(r=>CEe(r,e)).join(" "),CEe=(t,e)=>{Wr("caret",t,e);let r=e.loose?ji[ki.CARETLOOSE]:ji[ki.CARET],i=e.includePrerelease?"-0":"";return t.replace(r,(n,s,o,a,l)=>{Wr("caret",t,n,s,o,a,l);let c;return on(s)?c="":on(o)?c=`>=${s}.0.0${i} <${+s+1}.0.0-0`:on(a)?s==="0"?c=`>=${s}.${o}.0${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.0${i} <${+s+1}.0.0-0`:l?(Wr("replaceCaret pr",l),s==="0"?o==="0"?c=`>=${s}.${o}.${a}-${l} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}-${l} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a}-${l} <${+s+1}.0.0-0`):(Wr("no pr"),s==="0"?o==="0"?c=`>=${s}.${o}.${a}${i} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a} <${+s+1}.0.0-0`),Wr("caret return",c),c})},hEe=(t,e)=>(Wr("replaceXRanges",t,e),t.split(/\s+/).map(r=>mEe(r,e)).join(" ")),mEe=(t,e)=>{t=t.trim();let r=e.loose?ji[ki.XRANGELOOSE]:ji[ki.XRANGE];return t.replace(r,(i,n,s,o,a,l)=>{Wr("xRange",t,i,n,s,o,a,l);let c=on(s),u=c||on(o),g=u||on(a),f=g;return n==="="&&f&&(n=""),l=e.includePrerelease?"-0":"",c?n===">"||n==="<"?i="<0.0.0-0":i="*":n&&f?(u&&(o=0),a=0,n===">"?(n=">=",u?(s=+s+1,o=0,a=0):(o=+o+1,a=0)):n==="<="&&(n="<",u?s=+s+1:o=+o+1),n==="<"&&(l="-0"),i=`${n+s}.${o}.${a}${l}`):u?i=`>=${s}.0.0${l} <${+s+1}.0.0-0`:g&&(i=`>=${s}.${o}.0${l} <${s}.${+o+1}.0-0`),Wr("xRange return",i),i})},pEe=(t,e)=>(Wr("replaceStars",t,e),t.trim().replace(ji[ki.STAR],"")),AEe=(t,e)=>(Wr("replaceGTE0",t,e),t.trim().replace(ji[e.includePrerelease?ki.GTE0PRE:ki.GTE0],"")),lEe=t=>(e,r,i,n,s,o,a,l,c,u,g,f,h)=>(on(i)?r="":on(n)?r=`>=${i}.0.0${t?"-0":""}`:on(s)?r=`>=${i}.${n}.0${t?"-0":""}`:o?r=`>=${r}`:r=`>=${r}${t?"-0":""}`,on(c)?l="":on(u)?l=`<${+c+1}.0.0-0`:on(g)?l=`<${c}.${+u+1}.0-0`:f?l=`<=${c}.${u}.${g}-${f}`:t?l=`<${c}.${u}.${+g+1}-0`:l=`<=${l}`,`${r} ${l}`.trim()),cEe=(t,e,r)=>{for(let i=0;i0){let n=t[i].semver;if(n.major===e.major&&n.minor===e.minor&&n.patch===e.patch)return!0}return!1}return!0}});var vp=w((_et,cH)=>{var Sp=Symbol("SemVer ANY"),kp=class{static get ANY(){return Sp}constructor(e,r){if(r=EEe(r),e instanceof kp){if(e.loose===!!r.loose)return e;e=e.value}bv("comparator",e,r),this.options=r,this.loose=!!r.loose,this.parse(e),this.semver===Sp?this.value="":this.value=this.operator+this.semver.version,bv("comp",this)}parse(e){let r=this.options.loose?uH[gH.COMPARATORLOOSE]:uH[gH.COMPARATOR],i=e.match(r);if(!i)throw new TypeError(`Invalid comparator: ${e}`);this.operator=i[1]!==void 0?i[1]:"",this.operator==="="&&(this.operator=""),i[2]?this.semver=new fH(i[2],this.options.loose):this.semver=Sp}toString(){return this.value}test(e){if(bv("Comparator.test",e,this.options.loose),this.semver===Sp||e===Sp)return!0;if(typeof e=="string")try{e=new fH(e,this.options)}catch(r){return!1}return Bv(e,this.operator,this.semver,this.options)}intersects(e,r){if(!(e instanceof kp))throw new TypeError("a Comparator is required");if((!r||typeof r!="object")&&(r={loose:!!r,includePrerelease:!1}),this.operator==="")return this.value===""?!0:new hH(e.value,r).test(this.value);if(e.operator==="")return e.value===""?!0:new hH(this.value,r).test(e.semver);let i=(this.operator===">="||this.operator===">")&&(e.operator===">="||e.operator===">"),n=(this.operator==="<="||this.operator==="<")&&(e.operator==="<="||e.operator==="<"),s=this.semver.version===e.semver.version,o=(this.operator===">="||this.operator==="<=")&&(e.operator===">="||e.operator==="<="),a=Bv(this.semver,"<",e.semver,r)&&(this.operator===">="||this.operator===">")&&(e.operator==="<="||e.operator==="<"),l=Bv(this.semver,">",e.semver,r)&&(this.operator==="<="||this.operator==="<")&&(e.operator===">="||e.operator===">");return i||n||s&&o||a||l}};cH.exports=kp;var EEe=Ip(),{re:uH,t:gH}=mc(),Bv=Ev(),bv=Ep(),fH=Hi(),hH=Bs()});var xp=w((Vet,pH)=>{var IEe=Bs(),yEe=(t,e,r)=>{try{e=new IEe(e,r)}catch(i){return!1}return e.test(t)};pH.exports=yEe});var CH=w((Xet,dH)=>{var wEe=Bs(),BEe=(t,e)=>new wEe(t,e).set.map(r=>r.map(i=>i.value).join(" ").trim().split(" "));dH.exports=BEe});var EH=w((Zet,mH)=>{var bEe=Hi(),QEe=Bs(),vEe=(t,e,r)=>{let i=null,n=null,s=null;try{s=new QEe(e,r)}catch(o){return null}return t.forEach(o=>{s.test(o)&&(!i||n.compare(o)===-1)&&(i=o,n=new bEe(i,r))}),i};mH.exports=vEe});var yH=w(($et,IH)=>{var SEe=Hi(),kEe=Bs(),xEe=(t,e,r)=>{let i=null,n=null,s=null;try{s=new kEe(e,r)}catch(o){return null}return t.forEach(o=>{s.test(o)&&(!i||n.compare(o)===1)&&(i=o,n=new SEe(i,r))}),i};IH.exports=xEe});var bH=w((ett,wH)=>{var Qv=Hi(),PEe=Bs(),BH=wp(),DEe=(t,e)=>{t=new PEe(t,e);let r=new Qv("0.0.0");if(t.test(r)||(r=new Qv("0.0.0-0"),t.test(r)))return r;r=null;for(let i=0;i{let a=new Qv(o.semver.version);switch(o.operator){case">":a.prerelease.length===0?a.patch++:a.prerelease.push(0),a.raw=a.format();case"":case">=":(!s||BH(a,s))&&(s=a);break;case"<":case"<=":break;default:throw new Error(`Unexpected operation: ${o.operator}`)}}),s&&(!r||BH(r,s))&&(r=s)}return r&&t.test(r)?r:null};wH.exports=DEe});var vH=w((ttt,QH)=>{var REe=Bs(),FEe=(t,e)=>{try{return new REe(t,e).range||"*"}catch(r){return null}};QH.exports=FEe});var LI=w((rtt,SH)=>{var NEe=Hi(),kH=vp(),{ANY:LEe}=kH,TEe=Bs(),OEe=xp(),xH=wp(),PH=xI(),MEe=DI(),UEe=PI(),KEe=(t,e,r,i)=>{t=new NEe(t,i),e=new TEe(e,i);let n,s,o,a,l;switch(r){case">":n=xH,s=MEe,o=PH,a=">",l=">=";break;case"<":n=PH,s=UEe,o=xH,a="<",l="<=";break;default:throw new TypeError('Must provide a hilo val of "<" or ">"')}if(OEe(t,e,i))return!1;for(let c=0;c{h.semver===LEe&&(h=new kH(">=0.0.0")),g=g||h,f=f||h,n(h.semver,g.semver,i)?g=h:o(h.semver,f.semver,i)&&(f=h)}),g.operator===a||g.operator===l||(!f.operator||f.operator===a)&&s(t,f.semver))return!1;if(f.operator===l&&o(t,f.semver))return!1}return!0};SH.exports=KEe});var RH=w((itt,DH)=>{var HEe=LI(),jEe=(t,e,r)=>HEe(t,e,">",r);DH.exports=jEe});var NH=w((ntt,FH)=>{var GEe=LI(),YEe=(t,e,r)=>GEe(t,e,"<",r);FH.exports=YEe});var OH=w((stt,LH)=>{var TH=Bs(),qEe=(t,e,r)=>(t=new TH(t,r),e=new TH(e,r),t.intersects(e));LH.exports=qEe});var UH=w((ott,MH)=>{var JEe=xp(),WEe=ws();MH.exports=(t,e,r)=>{let i=[],n=null,s=null,o=t.sort((u,g)=>WEe(u,g,r));for(let u of o)JEe(u,e,r)?(s=u,n||(n=u)):(s&&i.push([n,s]),s=null,n=null);n&&i.push([n,null]);let a=[];for(let[u,g]of i)u===g?a.push(u):!g&&u===o[0]?a.push("*"):g?u===o[0]?a.push(`<=${g}`):a.push(`${u} - ${g}`):a.push(`>=${u}`);let l=a.join(" || "),c=typeof e.raw=="string"?e.raw:String(e);return l.length{var HH=Bs(),TI=vp(),{ANY:vv}=TI,Pp=xp(),Sv=ws(),_Ee=(t,e,r={})=>{if(t===e)return!0;t=new HH(t,r),e=new HH(e,r);let i=!1;e:for(let n of t.set){for(let s of e.set){let o=zEe(n,s,r);if(i=i||o!==null,o)continue e}if(i)return!1}return!0},zEe=(t,e,r)=>{if(t===e)return!0;if(t.length===1&&t[0].semver===vv){if(e.length===1&&e[0].semver===vv)return!0;r.includePrerelease?t=[new TI(">=0.0.0-0")]:t=[new TI(">=0.0.0")]}if(e.length===1&&e[0].semver===vv){if(r.includePrerelease)return!0;e=[new TI(">=0.0.0")]}let i=new Set,n,s;for(let h of t)h.operator===">"||h.operator===">="?n=jH(n,h,r):h.operator==="<"||h.operator==="<="?s=GH(s,h,r):i.add(h.semver);if(i.size>1)return null;let o;if(n&&s){if(o=Sv(n.semver,s.semver,r),o>0)return null;if(o===0&&(n.operator!==">="||s.operator!=="<="))return null}for(let h of i){if(n&&!Pp(h,String(n),r)||s&&!Pp(h,String(s),r))return null;for(let p of e)if(!Pp(h,String(p),r))return!1;return!0}let a,l,c,u,g=s&&!r.includePrerelease&&s.semver.prerelease.length?s.semver:!1,f=n&&!r.includePrerelease&&n.semver.prerelease.length?n.semver:!1;g&&g.prerelease.length===1&&s.operator==="<"&&g.prerelease[0]===0&&(g=!1);for(let h of e){if(u=u||h.operator===">"||h.operator===">=",c=c||h.operator==="<"||h.operator==="<=",n){if(f&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===f.major&&h.semver.minor===f.minor&&h.semver.patch===f.patch&&(f=!1),h.operator===">"||h.operator===">="){if(a=jH(n,h,r),a===h&&a!==n)return!1}else if(n.operator===">="&&!Pp(n.semver,String(h),r))return!1}if(s){if(g&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===g.major&&h.semver.minor===g.minor&&h.semver.patch===g.patch&&(g=!1),h.operator==="<"||h.operator==="<="){if(l=GH(s,h,r),l===h&&l!==s)return!1}else if(s.operator==="<="&&!Pp(s.semver,String(h),r))return!1}if(!h.operator&&(s||n)&&o!==0)return!1}return!(n&&c&&!s&&o!==0||s&&u&&!n&&o!==0||f||g)},jH=(t,e,r)=>{if(!t)return e;let i=Sv(t.semver,e.semver,r);return i>0?t:i<0||e.operator===">"&&t.operator===">="?e:t},GH=(t,e,r)=>{if(!t)return e;let i=Sv(t.semver,e.semver,r);return i<0?t:i>0||e.operator==="<"&&t.operator==="<="?e:t};KH.exports=_Ee});var ti=w((Att,qH)=>{var kv=mc();qH.exports={re:kv.re,src:kv.src,tokens:kv.t,SEMVER_SPEC_VERSION:mp().SEMVER_SPEC_VERSION,SemVer:Hi(),compareIdentifiers:bI().compareIdentifiers,rcompareIdentifiers:bI().rcompareIdentifiers,parse:Ec(),valid:f2(),clean:p2(),inc:C2(),diff:B2(),major:Q2(),minor:S2(),patch:x2(),prerelease:D2(),compare:ws(),rcompare:F2(),compareLoose:L2(),compareBuild:kI(),sort:U2(),rsort:H2(),gt:wp(),lt:xI(),eq:SI(),neq:mv(),gte:PI(),lte:DI(),cmp:Ev(),coerce:_2(),Comparator:vp(),Range:Bs(),satisfies:xp(),toComparators:CH(),maxSatisfying:EH(),minSatisfying:yH(),minVersion:bH(),validRange:vH(),outside:LI(),gtr:RH(),ltr:NH(),intersects:OH(),simplifyRange:UH(),subset:YH()}});var xv=w(OI=>{"use strict";Object.defineProperty(OI,"__esModule",{value:!0});OI.VERSION=void 0;OI.VERSION="9.1.0"});var Yt=w((exports,module)=>{"use strict";var __spreadArray=exports&&exports.__spreadArray||function(t,e,r){if(r||arguments.length===2)for(var i=0,n=e.length,s;i{(function(t,e){typeof define=="function"&&define.amd?define([],e):typeof MI=="object"&&MI.exports?MI.exports=e():t.regexpToAst=e()})(typeof self!="undefined"?self:JH,function(){function t(){}t.prototype.saveState=function(){return{idx:this.idx,input:this.input,groupIdx:this.groupIdx}},t.prototype.restoreState=function(p){this.idx=p.idx,this.input=p.input,this.groupIdx=p.groupIdx},t.prototype.pattern=function(p){this.idx=0,this.input=p,this.groupIdx=0,this.consumeChar("/");var m=this.disjunction();this.consumeChar("/");for(var y={type:"Flags",loc:{begin:this.idx,end:p.length},global:!1,ignoreCase:!1,multiLine:!1,unicode:!1,sticky:!1};this.isRegExpFlag();)switch(this.popChar()){case"g":o(y,"global");break;case"i":o(y,"ignoreCase");break;case"m":o(y,"multiLine");break;case"u":o(y,"unicode");break;case"y":o(y,"sticky");break}if(this.idx!==this.input.length)throw Error("Redundant input: "+this.input.substring(this.idx));return{type:"Pattern",flags:y,value:m,loc:this.loc(0)}},t.prototype.disjunction=function(){var p=[],m=this.idx;for(p.push(this.alternative());this.peekChar()==="|";)this.consumeChar("|"),p.push(this.alternative());return{type:"Disjunction",value:p,loc:this.loc(m)}},t.prototype.alternative=function(){for(var p=[],m=this.idx;this.isTerm();)p.push(this.term());return{type:"Alternative",value:p,loc:this.loc(m)}},t.prototype.term=function(){return this.isAssertion()?this.assertion():this.atom()},t.prototype.assertion=function(){var p=this.idx;switch(this.popChar()){case"^":return{type:"StartAnchor",loc:this.loc(p)};case"$":return{type:"EndAnchor",loc:this.loc(p)};case"\\":switch(this.popChar()){case"b":return{type:"WordBoundary",loc:this.loc(p)};case"B":return{type:"NonWordBoundary",loc:this.loc(p)}}throw Error("Invalid Assertion Escape");case"(":this.consumeChar("?");var m;switch(this.popChar()){case"=":m="Lookahead";break;case"!":m="NegativeLookahead";break}a(m);var y=this.disjunction();return this.consumeChar(")"),{type:m,value:y,loc:this.loc(p)}}l()},t.prototype.quantifier=function(p){var m,y=this.idx;switch(this.popChar()){case"*":m={atLeast:0,atMost:Infinity};break;case"+":m={atLeast:1,atMost:Infinity};break;case"?":m={atLeast:0,atMost:1};break;case"{":var Q=this.integerIncludingZero();switch(this.popChar()){case"}":m={atLeast:Q,atMost:Q};break;case",":var S;this.isDigit()?(S=this.integerIncludingZero(),m={atLeast:Q,atMost:S}):m={atLeast:Q,atMost:Infinity},this.consumeChar("}");break}if(p===!0&&m===void 0)return;a(m);break}if(!(p===!0&&m===void 0))return a(m),this.peekChar(0)==="?"?(this.consumeChar("?"),m.greedy=!1):m.greedy=!0,m.type="Quantifier",m.loc=this.loc(y),m},t.prototype.atom=function(){var p,m=this.idx;switch(this.peekChar()){case".":p=this.dotAll();break;case"\\":p=this.atomEscape();break;case"[":p=this.characterClass();break;case"(":p=this.group();break}return p===void 0&&this.isPatternCharacter()&&(p=this.patternCharacter()),a(p),p.loc=this.loc(m),this.isQuantifier()&&(p.quantifier=this.quantifier()),p},t.prototype.dotAll=function(){return this.consumeChar("."),{type:"Set",complement:!0,value:[n(`
+`),n("\r"),n("\u2028"),n("\u2029")]}},t.prototype.atomEscape=function(){switch(this.consumeChar("\\"),this.peekChar()){case"1":case"2":case"3":case"4":case"5":case"6":case"7":case"8":case"9":return this.decimalEscapeAtom();case"d":case"D":case"s":case"S":case"w":case"W":return this.characterClassEscape();case"f":case"n":case"r":case"t":case"v":return this.controlEscapeAtom();case"c":return this.controlLetterEscapeAtom();case"0":return this.nulCharacterAtom();case"x":return this.hexEscapeSequenceAtom();case"u":return this.regExpUnicodeEscapeSequenceAtom();default:return this.identityEscapeAtom()}},t.prototype.decimalEscapeAtom=function(){var p=this.positiveInteger();return{type:"GroupBackReference",value:p}},t.prototype.characterClassEscape=function(){var p,m=!1;switch(this.popChar()){case"d":p=u;break;case"D":p=u,m=!0;break;case"s":p=f;break;case"S":p=f,m=!0;break;case"w":p=g;break;case"W":p=g,m=!0;break}return a(p),{type:"Set",value:p,complement:m}},t.prototype.controlEscapeAtom=function(){var p;switch(this.popChar()){case"f":p=n("\f");break;case"n":p=n(`
+`);break;case"r":p=n("\r");break;case"t":p=n(" ");break;case"v":p=n("\v");break}return a(p),{type:"Character",value:p}},t.prototype.controlLetterEscapeAtom=function(){this.consumeChar("c");var p=this.popChar();if(/[a-zA-Z]/.test(p)===!1)throw Error("Invalid ");var m=p.toUpperCase().charCodeAt(0)-64;return{type:"Character",value:m}},t.prototype.nulCharacterAtom=function(){return this.consumeChar("0"),{type:"Character",value:n("\0")}},t.prototype.hexEscapeSequenceAtom=function(){return this.consumeChar("x"),this.parseHexDigits(2)},t.prototype.regExpUnicodeEscapeSequenceAtom=function(){return this.consumeChar("u"),this.parseHexDigits(4)},t.prototype.identityEscapeAtom=function(){var p=this.popChar();return{type:"Character",value:n(p)}},t.prototype.classPatternCharacterAtom=function(){switch(this.peekChar()){case`
+`:case"\r":case"\u2028":case"\u2029":case"\\":case"]":throw Error("TBD");default:var p=this.popChar();return{type:"Character",value:n(p)}}},t.prototype.characterClass=function(){var p=[],m=!1;for(this.consumeChar("["),this.peekChar(0)==="^"&&(this.consumeChar("^"),m=!0);this.isClassAtom();){var y=this.classAtom(),Q=y.type==="Character";if(Q&&this.isRangeDash()){this.consumeChar("-");var S=this.classAtom(),x=S.type==="Character";if(x){if(S.value=this.input.length)throw Error("Unexpected end of input");this.idx++},t.prototype.loc=function(p){return{begin:p,end:this.idx}};var e=/[0-9a-fA-F]/,r=/[0-9]/,i=/[1-9]/;function n(p){return p.charCodeAt(0)}function s(p,m){p.length!==void 0?p.forEach(function(y){m.push(y)}):m.push(p)}function o(p,m){if(p[m]===!0)throw"duplicate flag "+m;p[m]=!0}function a(p){if(p===void 0)throw Error("Internal Error - Should never get here!")}function l(){throw Error("Internal Error - Should never get here!")}var c,u=[];for(c=n("0");c<=n("9");c++)u.push(c);var g=[n("_")].concat(u);for(c=n("a");c<=n("z");c++)g.push(c);for(c=n("A");c<=n("Z");c++)g.push(c);var f=[n(" "),n("\f"),n(`
+`),n("\r"),n(" "),n("\v"),n(" "),n("\xA0"),n("\u1680"),n("\u2000"),n("\u2001"),n("\u2002"),n("\u2003"),n("\u2004"),n("\u2005"),n("\u2006"),n("\u2007"),n("\u2008"),n("\u2009"),n("\u200A"),n("\u2028"),n("\u2029"),n("\u202F"),n("\u205F"),n("\u3000"),n("\uFEFF")];function h(){}return h.prototype.visitChildren=function(p){for(var m in p){var y=p[m];p.hasOwnProperty(m)&&(y.type!==void 0?this.visit(y):Array.isArray(y)&&y.forEach(function(Q){this.visit(Q)},this))}},h.prototype.visit=function(p){switch(p.type){case"Pattern":this.visitPattern(p);break;case"Flags":this.visitFlags(p);break;case"Disjunction":this.visitDisjunction(p);break;case"Alternative":this.visitAlternative(p);break;case"StartAnchor":this.visitStartAnchor(p);break;case"EndAnchor":this.visitEndAnchor(p);break;case"WordBoundary":this.visitWordBoundary(p);break;case"NonWordBoundary":this.visitNonWordBoundary(p);break;case"Lookahead":this.visitLookahead(p);break;case"NegativeLookahead":this.visitNegativeLookahead(p);break;case"Character":this.visitCharacter(p);break;case"Set":this.visitSet(p);break;case"Group":this.visitGroup(p);break;case"GroupBackReference":this.visitGroupBackReference(p);break;case"Quantifier":this.visitQuantifier(p);break}this.visitChildren(p)},h.prototype.visitPattern=function(p){},h.prototype.visitFlags=function(p){},h.prototype.visitDisjunction=function(p){},h.prototype.visitAlternative=function(p){},h.prototype.visitStartAnchor=function(p){},h.prototype.visitEndAnchor=function(p){},h.prototype.visitWordBoundary=function(p){},h.prototype.visitNonWordBoundary=function(p){},h.prototype.visitLookahead=function(p){},h.prototype.visitNegativeLookahead=function(p){},h.prototype.visitCharacter=function(p){},h.prototype.visitSet=function(p){},h.prototype.visitGroup=function(p){},h.prototype.visitGroupBackReference=function(p){},h.prototype.visitQuantifier=function(p){},{RegExpParser:t,BaseRegExpVisitor:h,VERSION:"0.5.0"}})});var HI=w(ug=>{"use strict";Object.defineProperty(ug,"__esModule",{value:!0});ug.clearRegExpParserCache=ug.getRegExpAst=void 0;var VEe=UI(),KI={},XEe=new VEe.RegExpParser;function ZEe(t){var e=t.toString();if(KI.hasOwnProperty(e))return KI[e];var r=XEe.pattern(e);return KI[e]=r,r}ug.getRegExpAst=ZEe;function $Ee(){KI={}}ug.clearRegExpParserCache=$Ee});var XH=w(Bn=>{"use strict";var eIe=Bn&&Bn.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Bn,"__esModule",{value:!0});Bn.canMatchCharCode=Bn.firstCharOptimizedIndices=Bn.getOptimizedStartCodesIndices=Bn.failedOptimizationPrefixMsg=void 0;var WH=UI(),bs=Yt(),zH=HI(),Ma=Pv(),_H="Complement Sets are not supported for first char optimization";Bn.failedOptimizationPrefixMsg=`Unable to use "first char" lexer optimizations:
+`;function tIe(t,e){e===void 0&&(e=!1);try{var r=(0,zH.getRegExpAst)(t),i=jI(r.value,{},r.flags.ignoreCase);return i}catch(s){if(s.message===_H)e&&(0,bs.PRINT_WARNING)(""+Bn.failedOptimizationPrefixMsg+(" Unable to optimize: < "+t.toString()+` >
+`)+` Complement Sets cannot be automatically optimized.
+ This will disable the lexer's first char optimizations.
+ See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#COMPLEMENT for details.`);else{var n="";e&&(n=`
+ This will disable the lexer's first char optimizations.
+ See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#REGEXP_PARSING for details.`),(0,bs.PRINT_ERROR)(Bn.failedOptimizationPrefixMsg+`
+`+(" Failed parsing: < "+t.toString()+` >
+`)+(" Using the regexp-to-ast library version: "+WH.VERSION+`
+`)+" Please open an issue at: https://github.com/bd82/regexp-to-ast/issues"+n)}}return[]}Bn.getOptimizedStartCodesIndices=tIe;function jI(t,e,r){switch(t.type){case"Disjunction":for(var i=0;i=Ma.minOptimizationVal)for(var f=u.from>=Ma.minOptimizationVal?u.from:Ma.minOptimizationVal,h=u.to,p=(0,Ma.charCodeToOptimizedIndex)(f),m=(0,Ma.charCodeToOptimizedIndex)(h),y=p;y<=m;y++)e[y]=y}}});break;case"Group":jI(o.value,e,r);break;default:throw Error("Non Exhaustive Match")}var a=o.quantifier!==void 0&&o.quantifier.atLeast===0;if(o.type==="Group"&&Dv(o)===!1||o.type!=="Group"&&a===!1)break}break;default:throw Error("non exhaustive match!")}return(0,bs.values)(e)}Bn.firstCharOptimizedIndices=jI;function GI(t,e,r){var i=(0,Ma.charCodeToOptimizedIndex)(t);e[i]=i,r===!0&&rIe(t,e)}function rIe(t,e){var r=String.fromCharCode(t),i=r.toUpperCase();if(i!==r){var n=(0,Ma.charCodeToOptimizedIndex)(i.charCodeAt(0));e[n]=n}else{var s=r.toLowerCase();if(s!==r){var n=(0,Ma.charCodeToOptimizedIndex)(s.charCodeAt(0));e[n]=n}}}function VH(t,e){return(0,bs.find)(t.value,function(r){if(typeof r=="number")return(0,bs.contains)(e,r);var i=r;return(0,bs.find)(e,function(n){return i.from<=n&&n<=i.to})!==void 0})}function Dv(t){return t.quantifier&&t.quantifier.atLeast===0?!0:t.value?(0,bs.isArray)(t.value)?(0,bs.every)(t.value,Dv):Dv(t.value):!1}var iIe=function(t){eIe(e,t);function e(r){var i=t.call(this)||this;return i.targetCharCodes=r,i.found=!1,i}return e.prototype.visitChildren=function(r){if(this.found!==!0){switch(r.type){case"Lookahead":this.visitLookahead(r);return;case"NegativeLookahead":this.visitNegativeLookahead(r);return}t.prototype.visitChildren.call(this,r)}},e.prototype.visitCharacter=function(r){(0,bs.contains)(this.targetCharCodes,r.value)&&(this.found=!0)},e.prototype.visitSet=function(r){r.complement?VH(r,this.targetCharCodes)===void 0&&(this.found=!0):VH(r,this.targetCharCodes)!==void 0&&(this.found=!0)},e}(WH.BaseRegExpVisitor);function nIe(t,e){if(e instanceof RegExp){var r=(0,zH.getRegExpAst)(e),i=new iIe(t);return i.visit(r),i.found}else return(0,bs.find)(e,function(n){return(0,bs.contains)(t,n.charCodeAt(0))})!==void 0}Bn.canMatchCharCode=nIe});var Pv=w(Ze=>{"use strict";var ZH=Ze&&Ze.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Ze,"__esModule",{value:!0});Ze.charCodeToOptimizedIndex=Ze.minOptimizationVal=Ze.buildLineBreakIssueMessage=Ze.LineTerminatorOptimizedTester=Ze.isShortPattern=Ze.isCustomPattern=Ze.cloneEmptyGroups=Ze.performWarningRuntimeChecks=Ze.performRuntimeChecks=Ze.addStickyFlag=Ze.addStartOfInput=Ze.findUnreachablePatterns=Ze.findModesThatDoNotExist=Ze.findInvalidGroupType=Ze.findDuplicatePatterns=Ze.findUnsupportedFlags=Ze.findStartOfInputAnchor=Ze.findEmptyMatchRegExps=Ze.findEndOfInputAnchor=Ze.findInvalidPatterns=Ze.findMissingPatterns=Ze.validatePatterns=Ze.analyzeTokenTypes=Ze.enableSticky=Ze.disableSticky=Ze.SUPPORT_STICKY=Ze.MODES=Ze.DEFAULT_MODE=void 0;var $H=UI(),Ar=Dp(),Ne=Yt(),gg=XH(),ej=HI(),Lo="PATTERN";Ze.DEFAULT_MODE="defaultMode";Ze.MODES="modes";Ze.SUPPORT_STICKY=typeof new RegExp("(?:)").sticky=="boolean";function sIe(){Ze.SUPPORT_STICKY=!1}Ze.disableSticky=sIe;function oIe(){Ze.SUPPORT_STICKY=!0}Ze.enableSticky=oIe;function AIe(t,e){e=(0,Ne.defaults)(e,{useSticky:Ze.SUPPORT_STICKY,debug:!1,safeMode:!1,positionTracking:"full",lineTerminatorCharacters:["\r",`
+`],tracer:function(S,x){return x()}});var r=e.tracer;r("initCharCodeToOptimizedIndexMap",function(){aIe()});var i;r("Reject Lexer.NA",function(){i=(0,Ne.reject)(t,function(S){return S[Lo]===Ar.Lexer.NA})});var n=!1,s;r("Transform Patterns",function(){n=!1,s=(0,Ne.map)(i,function(S){var x=S[Lo];if((0,Ne.isRegExp)(x)){var M=x.source;return M.length===1&&M!=="^"&&M!=="$"&&M!=="."&&!x.ignoreCase?M:M.length===2&&M[0]==="\\"&&!(0,Ne.contains)(["d","D","s","S","t","r","n","t","0","c","b","B","f","v","w","W"],M[1])?M[1]:e.useSticky?Fv(x):Rv(x)}else{if((0,Ne.isFunction)(x))return n=!0,{exec:x};if((0,Ne.has)(x,"exec"))return n=!0,x;if(typeof x=="string"){if(x.length===1)return x;var Y=x.replace(/[\\^$.*+?()[\]{}|]/g,"\\$&"),U=new RegExp(Y);return e.useSticky?Fv(U):Rv(U)}else throw Error("non exhaustive match")}})});var o,a,l,c,u;r("misc mapping",function(){o=(0,Ne.map)(i,function(S){return S.tokenTypeIdx}),a=(0,Ne.map)(i,function(S){var x=S.GROUP;if(x!==Ar.Lexer.SKIPPED){if((0,Ne.isString)(x))return x;if((0,Ne.isUndefined)(x))return!1;throw Error("non exhaustive match")}}),l=(0,Ne.map)(i,function(S){var x=S.LONGER_ALT;if(x){var M=(0,Ne.isArray)(x)?(0,Ne.map)(x,function(Y){return(0,Ne.indexOf)(i,Y)}):[(0,Ne.indexOf)(i,x)];return M}}),c=(0,Ne.map)(i,function(S){return S.PUSH_MODE}),u=(0,Ne.map)(i,function(S){return(0,Ne.has)(S,"POP_MODE")})});var g;r("Line Terminator Handling",function(){var S=ij(e.lineTerminatorCharacters);g=(0,Ne.map)(i,function(x){return!1}),e.positionTracking!=="onlyOffset"&&(g=(0,Ne.map)(i,function(x){if((0,Ne.has)(x,"LINE_BREAKS"))return x.LINE_BREAKS;if(rj(x,S)===!1)return(0,gg.canMatchCharCode)(S,x.PATTERN)}))});var f,h,p,m;r("Misc Mapping #2",function(){f=(0,Ne.map)(i,Nv),h=(0,Ne.map)(s,tj),p=(0,Ne.reduce)(i,function(S,x){var M=x.GROUP;return(0,Ne.isString)(M)&&M!==Ar.Lexer.SKIPPED&&(S[M]=[]),S},{}),m=(0,Ne.map)(s,function(S,x){return{pattern:s[x],longerAlt:l[x],canLineTerminator:g[x],isCustom:f[x],short:h[x],group:a[x],push:c[x],pop:u[x],tokenTypeIdx:o[x],tokenType:i[x]}})});var y=!0,Q=[];return e.safeMode||r("First Char Optimization",function(){Q=(0,Ne.reduce)(i,function(S,x,M){if(typeof x.PATTERN=="string"){var Y=x.PATTERN.charCodeAt(0),U=Tv(Y);Lv(S,U,m[M])}else if((0,Ne.isArray)(x.START_CHARS_HINT)){var J;(0,Ne.forEach)(x.START_CHARS_HINT,function(ee){var Z=typeof ee=="string"?ee.charCodeAt(0):ee,A=Tv(Z);J!==A&&(J=A,Lv(S,A,m[M]))})}else if((0,Ne.isRegExp)(x.PATTERN))if(x.PATTERN.unicode)y=!1,e.ensureOptimizations&&(0,Ne.PRINT_ERROR)(""+gg.failedOptimizationPrefixMsg+(" Unable to analyze < "+x.PATTERN.toString()+` > pattern.
+`)+` The regexp unicode flag is not currently supported by the regexp-to-ast library.
+ This will disable the lexer's first char optimizations.
+ For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNICODE_OPTIMIZE`);else{var W=(0,gg.getOptimizedStartCodesIndices)(x.PATTERN,e.ensureOptimizations);(0,Ne.isEmpty)(W)&&(y=!1),(0,Ne.forEach)(W,function(ee){Lv(S,ee,m[M])})}else e.ensureOptimizations&&(0,Ne.PRINT_ERROR)(""+gg.failedOptimizationPrefixMsg+(" TokenType: <"+x.name+`> is using a custom token pattern without providing parameter.
+`)+` This will disable the lexer's first char optimizations.
+ For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_OPTIMIZE`),y=!1;return S},[])}),r("ArrayPacking",function(){Q=(0,Ne.packArray)(Q)}),{emptyGroups:p,patternIdxToConfig:m,charCodeToPatternIdxToConfig:Q,hasCustom:n,canBeOptimized:y}}Ze.analyzeTokenTypes=AIe;function cIe(t,e){var r=[],i=nj(t);r=r.concat(i.errors);var n=sj(i.valid),s=n.valid;return r=r.concat(n.errors),r=r.concat(lIe(s)),r=r.concat(oj(s)),r=r.concat(aj(s,e)),r=r.concat(Aj(s)),r}Ze.validatePatterns=cIe;function lIe(t){var e=[],r=(0,Ne.filter)(t,function(i){return(0,Ne.isRegExp)(i[Lo])});return e=e.concat(lj(r)),e=e.concat(uj(r)),e=e.concat(gj(r)),e=e.concat(fj(r)),e=e.concat(cj(r)),e}function nj(t){var e=(0,Ne.filter)(t,function(n){return!(0,Ne.has)(n,Lo)}),r=(0,Ne.map)(e,function(n){return{message:"Token Type: ->"+n.name+"<- missing static 'PATTERN' property",type:Ar.LexerDefinitionErrorType.MISSING_PATTERN,tokenTypes:[n]}}),i=(0,Ne.difference)(t,e);return{errors:r,valid:i}}Ze.findMissingPatterns=nj;function sj(t){var e=(0,Ne.filter)(t,function(n){var s=n[Lo];return!(0,Ne.isRegExp)(s)&&!(0,Ne.isFunction)(s)&&!(0,Ne.has)(s,"exec")&&!(0,Ne.isString)(s)}),r=(0,Ne.map)(e,function(n){return{message:"Token Type: ->"+n.name+"<- static 'PATTERN' can only be a RegExp, a Function matching the {CustomPatternMatcherFunc} type or an Object matching the {ICustomPattern} interface.",type:Ar.LexerDefinitionErrorType.INVALID_PATTERN,tokenTypes:[n]}}),i=(0,Ne.difference)(t,e);return{errors:r,valid:i}}Ze.findInvalidPatterns=sj;var uIe=/[^\\][\$]/;function lj(t){var e=function(n){ZH(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitEndAnchor=function(o){this.found=!0},s}($H.BaseRegExpVisitor),r=(0,Ne.filter)(t,function(n){var s=n[Lo];try{var o=(0,ej.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch(l){return uIe.test(s.source)}}),i=(0,Ne.map)(r,function(n){return{message:`Unexpected RegExp Anchor Error:
+ Token Type: ->`+n.name+`<- static 'PATTERN' cannot contain end of input anchor '$'
+ See chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS for details.`,type:Ar.LexerDefinitionErrorType.EOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ze.findEndOfInputAnchor=lj;function cj(t){var e=(0,Ne.filter)(t,function(i){var n=i[Lo];return n.test("")}),r=(0,Ne.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'PATTERN' must not match an empty string",type:Ar.LexerDefinitionErrorType.EMPTY_MATCH_PATTERN,tokenTypes:[i]}});return r}Ze.findEmptyMatchRegExps=cj;var gIe=/[^\\[][\^]|^\^/;function uj(t){var e=function(n){ZH(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitStartAnchor=function(o){this.found=!0},s}($H.BaseRegExpVisitor),r=(0,Ne.filter)(t,function(n){var s=n[Lo];try{var o=(0,ej.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch(l){return gIe.test(s.source)}}),i=(0,Ne.map)(r,function(n){return{message:`Unexpected RegExp Anchor Error:
+ Token Type: ->`+n.name+`<- static 'PATTERN' cannot contain start of input anchor '^'
+ See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS for details.`,type:Ar.LexerDefinitionErrorType.SOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ze.findStartOfInputAnchor=uj;function gj(t){var e=(0,Ne.filter)(t,function(i){var n=i[Lo];return n instanceof RegExp&&(n.multiline||n.global)}),r=(0,Ne.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'PATTERN' may NOT contain global('g') or multiline('m')",type:Ar.LexerDefinitionErrorType.UNSUPPORTED_FLAGS_FOUND,tokenTypes:[i]}});return r}Ze.findUnsupportedFlags=gj;function fj(t){var e=[],r=(0,Ne.map)(t,function(s){return(0,Ne.reduce)(t,function(o,a){return s.PATTERN.source===a.PATTERN.source&&!(0,Ne.contains)(e,a)&&a.PATTERN!==Ar.Lexer.NA&&(e.push(a),o.push(a)),o},[])});r=(0,Ne.compact)(r);var i=(0,Ne.filter)(r,function(s){return s.length>1}),n=(0,Ne.map)(i,function(s){var o=(0,Ne.map)(s,function(l){return l.name}),a=(0,Ne.first)(s).PATTERN;return{message:"The same RegExp pattern ->"+a+"<-"+("has been used in all of the following Token Types: "+o.join(", ")+" <-"),type:Ar.LexerDefinitionErrorType.DUPLICATE_PATTERNS_FOUND,tokenTypes:s}});return n}Ze.findDuplicatePatterns=fj;function oj(t){var e=(0,Ne.filter)(t,function(i){if(!(0,Ne.has)(i,"GROUP"))return!1;var n=i.GROUP;return n!==Ar.Lexer.SKIPPED&&n!==Ar.Lexer.NA&&!(0,Ne.isString)(n)}),r=(0,Ne.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'GROUP' can only be Lexer.SKIPPED/Lexer.NA/A String",type:Ar.LexerDefinitionErrorType.INVALID_GROUP_TYPE_FOUND,tokenTypes:[i]}});return r}Ze.findInvalidGroupType=oj;function aj(t,e){var r=(0,Ne.filter)(t,function(n){return n.PUSH_MODE!==void 0&&!(0,Ne.contains)(e,n.PUSH_MODE)}),i=(0,Ne.map)(r,function(n){var s="Token Type: ->"+n.name+"<- static 'PUSH_MODE' value cannot refer to a Lexer Mode ->"+n.PUSH_MODE+"<-which does not exist";return{message:s,type:Ar.LexerDefinitionErrorType.PUSH_MODE_DOES_NOT_EXIST,tokenTypes:[n]}});return i}Ze.findModesThatDoNotExist=aj;function Aj(t){var e=[],r=(0,Ne.reduce)(t,function(i,n,s){var o=n.PATTERN;return o===Ar.Lexer.NA||((0,Ne.isString)(o)?i.push({str:o,idx:s,tokenType:n}):(0,Ne.isRegExp)(o)&&hIe(o)&&i.push({str:o.source,idx:s,tokenType:n})),i},[]);return(0,Ne.forEach)(t,function(i,n){(0,Ne.forEach)(r,function(s){var o=s.str,a=s.idx,l=s.tokenType;if(n"+i.name+"<-")+`in the lexer's definition.
+See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNREACHABLE`;e.push({message:c,type:Ar.LexerDefinitionErrorType.UNREACHABLE_PATTERN,tokenTypes:[i,l]})}})}),e}Ze.findUnreachablePatterns=Aj;function fIe(t,e){if((0,Ne.isRegExp)(e)){var r=e.exec(t);return r!==null&&r.index===0}else{if((0,Ne.isFunction)(e))return e(t,0,[],{});if((0,Ne.has)(e,"exec"))return e.exec(t,0,[],{});if(typeof e=="string")return e===t;throw Error("non exhaustive match")}}function hIe(t){var e=[".","\\","[","]","|","^","$","(",")","?","*","+","{"];return(0,Ne.find)(e,function(r){return t.source.indexOf(r)!==-1})===void 0}function Rv(t){var e=t.ignoreCase?"i":"";return new RegExp("^(?:"+t.source+")",e)}Ze.addStartOfInput=Rv;function Fv(t){var e=t.ignoreCase?"iy":"y";return new RegExp(""+t.source,e)}Ze.addStickyFlag=Fv;function pIe(t,e,r){var i=[];return(0,Ne.has)(t,Ze.DEFAULT_MODE)||i.push({message:"A MultiMode Lexer cannot be initialized without a <"+Ze.DEFAULT_MODE+`> property in its definition
+`,type:Ar.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE}),(0,Ne.has)(t,Ze.MODES)||i.push({message:"A MultiMode Lexer cannot be initialized without a <"+Ze.MODES+`> property in its definition
+`,type:Ar.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY}),(0,Ne.has)(t,Ze.MODES)&&(0,Ne.has)(t,Ze.DEFAULT_MODE)&&!(0,Ne.has)(t.modes,t.defaultMode)&&i.push({message:"A MultiMode Lexer cannot be initialized with a "+Ze.DEFAULT_MODE+": <"+t.defaultMode+`>which does not exist
+`,type:Ar.LexerDefinitionErrorType.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST}),(0,Ne.has)(t,Ze.MODES)&&(0,Ne.forEach)(t.modes,function(n,s){(0,Ne.forEach)(n,function(o,a){(0,Ne.isUndefined)(o)&&i.push({message:"A Lexer cannot be initialized using an undefined Token Type. Mode:"+("<"+s+"> at index: <"+a+`>
+`),type:Ar.LexerDefinitionErrorType.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED})})}),i}Ze.performRuntimeChecks=pIe;function dIe(t,e,r){var i=[],n=!1,s=(0,Ne.compact)((0,Ne.flatten)((0,Ne.mapValues)(t.modes,function(l){return l}))),o=(0,Ne.reject)(s,function(l){return l[Lo]===Ar.Lexer.NA}),a=ij(r);return e&&(0,Ne.forEach)(o,function(l){var c=rj(l,a);if(c!==!1){var u=hj(l,c),g={message:u,type:c.issue,tokenType:l};i.push(g)}else(0,Ne.has)(l,"LINE_BREAKS")?l.LINE_BREAKS===!0&&(n=!0):(0,gg.canMatchCharCode)(a,l.PATTERN)&&(n=!0)}),e&&!n&&i.push({message:`Warning: No LINE_BREAKS Found.
+ This Lexer has been defined to track line and column information,
+ But none of the Token Types can be identified as matching a line terminator.
+ See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#LINE_BREAKS
+ for details.`,type:Ar.LexerDefinitionErrorType.NO_LINE_BREAKS_FLAGS}),i}Ze.performWarningRuntimeChecks=dIe;function CIe(t){var e={},r=(0,Ne.keys)(t);return(0,Ne.forEach)(r,function(i){var n=t[i];if((0,Ne.isArray)(n))e[i]=[];else throw Error("non exhaustive match")}),e}Ze.cloneEmptyGroups=CIe;function Nv(t){var e=t.PATTERN;if((0,Ne.isRegExp)(e))return!1;if((0,Ne.isFunction)(e))return!0;if((0,Ne.has)(e,"exec"))return!0;if((0,Ne.isString)(e))return!1;throw Error("non exhaustive match")}Ze.isCustomPattern=Nv;function tj(t){return(0,Ne.isString)(t)&&t.length===1?t.charCodeAt(0):!1}Ze.isShortPattern=tj;Ze.LineTerminatorOptimizedTester={test:function(t){for(var e=t.length,r=this.lastIndex;r Token Type
+`)+(" Root cause: "+e.errMsg+`.
+`)+" For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#IDENTIFY_TERMINATOR";if(e.issue===Ar.LexerDefinitionErrorType.CUSTOM_LINE_BREAK)return`Warning: A Custom Token Pattern should specify the option.
+`+(" The problem is in the <"+t.name+`> Token Type
+`)+" For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_LINE_BREAK";throw Error("non exhaustive match")}Ze.buildLineBreakIssueMessage=hj;function ij(t){var e=(0,Ne.map)(t,function(r){return(0,Ne.isString)(r)&&r.length>0?r.charCodeAt(0):r});return e}function Lv(t,e,r){t[e]===void 0?t[e]=[r]:t[e].push(r)}Ze.minOptimizationVal=256;var YI=[];function Tv(t){return t255?255+~~(t/255):t}}});var fg=w(Ft=>{"use strict";Object.defineProperty(Ft,"__esModule",{value:!0});Ft.isTokenType=Ft.hasExtendingTokensTypesMapProperty=Ft.hasExtendingTokensTypesProperty=Ft.hasCategoriesProperty=Ft.hasShortKeyProperty=Ft.singleAssignCategoriesToksMap=Ft.assignCategoriesMapProp=Ft.assignCategoriesTokensProp=Ft.assignTokenDefaultProps=Ft.expandCategories=Ft.augmentTokenTypes=Ft.tokenIdxToClass=Ft.tokenShortNameIdx=Ft.tokenStructuredMatcherNoCategories=Ft.tokenStructuredMatcher=void 0;var ri=Yt();function mIe(t,e){var r=t.tokenTypeIdx;return r===e.tokenTypeIdx?!0:e.isParent===!0&&e.categoryMatchesMap[r]===!0}Ft.tokenStructuredMatcher=mIe;function EIe(t,e){return t.tokenTypeIdx===e.tokenTypeIdx}Ft.tokenStructuredMatcherNoCategories=EIe;Ft.tokenShortNameIdx=1;Ft.tokenIdxToClass={};function IIe(t){var e=pj(t);dj(e),mj(e),Cj(e),(0,ri.forEach)(e,function(r){r.isParent=r.categoryMatches.length>0})}Ft.augmentTokenTypes=IIe;function pj(t){for(var e=(0,ri.cloneArr)(t),r=t,i=!0;i;){r=(0,ri.compact)((0,ri.flatten)((0,ri.map)(r,function(s){return s.CATEGORIES})));var n=(0,ri.difference)(r,e);e=e.concat(n),(0,ri.isEmpty)(n)?i=!1:r=n}return e}Ft.expandCategories=pj;function dj(t){(0,ri.forEach)(t,function(e){Ej(e)||(Ft.tokenIdxToClass[Ft.tokenShortNameIdx]=e,e.tokenTypeIdx=Ft.tokenShortNameIdx++),Ov(e)&&!(0,ri.isArray)(e.CATEGORIES)&&(e.CATEGORIES=[e.CATEGORIES]),Ov(e)||(e.CATEGORIES=[]),Ij(e)||(e.categoryMatches=[]),yj(e)||(e.categoryMatchesMap={})})}Ft.assignTokenDefaultProps=dj;function Cj(t){(0,ri.forEach)(t,function(e){e.categoryMatches=[],(0,ri.forEach)(e.categoryMatchesMap,function(r,i){e.categoryMatches.push(Ft.tokenIdxToClass[i].tokenTypeIdx)})})}Ft.assignCategoriesTokensProp=Cj;function mj(t){(0,ri.forEach)(t,function(e){Mv([],e)})}Ft.assignCategoriesMapProp=mj;function Mv(t,e){(0,ri.forEach)(t,function(r){e.categoryMatchesMap[r.tokenTypeIdx]=!0}),(0,ri.forEach)(e.CATEGORIES,function(r){var i=t.concat(e);(0,ri.contains)(i,r)||Mv(i,r)})}Ft.singleAssignCategoriesToksMap=Mv;function Ej(t){return(0,ri.has)(t,"tokenTypeIdx")}Ft.hasShortKeyProperty=Ej;function Ov(t){return(0,ri.has)(t,"CATEGORIES")}Ft.hasCategoriesProperty=Ov;function Ij(t){return(0,ri.has)(t,"categoryMatches")}Ft.hasExtendingTokensTypesProperty=Ij;function yj(t){return(0,ri.has)(t,"categoryMatchesMap")}Ft.hasExtendingTokensTypesMapProperty=yj;function yIe(t){return(0,ri.has)(t,"tokenTypeIdx")}Ft.isTokenType=yIe});var Uv=w(qI=>{"use strict";Object.defineProperty(qI,"__esModule",{value:!0});qI.defaultLexerErrorProvider=void 0;qI.defaultLexerErrorProvider={buildUnableToPopLexerModeMessage:function(t){return"Unable to pop Lexer Mode after encountering Token ->"+t.image+"<- The Mode Stack is empty"},buildUnexpectedCharactersMessage:function(t,e,r,i,n){return"unexpected character: ->"+t.charAt(e)+"<- at offset: "+e+","+(" skipped "+r+" characters.")}}});var Dp=w(Bc=>{"use strict";Object.defineProperty(Bc,"__esModule",{value:!0});Bc.Lexer=Bc.LexerDefinitionErrorType=void 0;var so=Pv(),lr=Yt(),wIe=fg(),BIe=Uv(),bIe=HI(),QIe;(function(t){t[t.MISSING_PATTERN=0]="MISSING_PATTERN",t[t.INVALID_PATTERN=1]="INVALID_PATTERN",t[t.EOI_ANCHOR_FOUND=2]="EOI_ANCHOR_FOUND",t[t.UNSUPPORTED_FLAGS_FOUND=3]="UNSUPPORTED_FLAGS_FOUND",t[t.DUPLICATE_PATTERNS_FOUND=4]="DUPLICATE_PATTERNS_FOUND",t[t.INVALID_GROUP_TYPE_FOUND=5]="INVALID_GROUP_TYPE_FOUND",t[t.PUSH_MODE_DOES_NOT_EXIST=6]="PUSH_MODE_DOES_NOT_EXIST",t[t.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE=7]="MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE",t[t.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY=8]="MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY",t[t.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST=9]="MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST",t[t.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED=10]="LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED",t[t.SOI_ANCHOR_FOUND=11]="SOI_ANCHOR_FOUND",t[t.EMPTY_MATCH_PATTERN=12]="EMPTY_MATCH_PATTERN",t[t.NO_LINE_BREAKS_FLAGS=13]="NO_LINE_BREAKS_FLAGS",t[t.UNREACHABLE_PATTERN=14]="UNREACHABLE_PATTERN",t[t.IDENTIFY_TERMINATOR=15]="IDENTIFY_TERMINATOR",t[t.CUSTOM_LINE_BREAK=16]="CUSTOM_LINE_BREAK"})(QIe=Bc.LexerDefinitionErrorType||(Bc.LexerDefinitionErrorType={}));var Rp={deferDefinitionErrorsHandling:!1,positionTracking:"full",lineTerminatorsPattern:/\n|\r\n?/g,lineTerminatorCharacters:[`
+`,"\r"],ensureOptimizations:!1,safeMode:!1,errorMessageProvider:BIe.defaultLexerErrorProvider,traceInitPerf:!1,skipValidations:!1};Object.freeze(Rp);var vIe=function(){function t(e,r){var i=this;if(r===void 0&&(r=Rp),this.lexerDefinition=e,this.lexerDefinitionErrors=[],this.lexerDefinitionWarning=[],this.patternIdxToConfig={},this.charCodeToPatternIdxToConfig={},this.modes=[],this.emptyGroups={},this.config=void 0,this.trackStartLines=!0,this.trackEndLines=!0,this.hasCustom=!1,this.canModeBeOptimized={},typeof r=="boolean")throw Error(`The second argument to the Lexer constructor is now an ILexerConfig Object.
+a boolean 2nd argument is no longer supported`);this.config=(0,lr.merge)(Rp,r);var n=this.config.traceInitPerf;n===!0?(this.traceInitMaxIdent=Infinity,this.traceInitPerf=!0):typeof n=="number"&&(this.traceInitMaxIdent=n,this.traceInitPerf=!0),this.traceInitIndent=-1,this.TRACE_INIT("Lexer Constructor",function(){var s,o=!0;i.TRACE_INIT("Lexer Config handling",function(){if(i.config.lineTerminatorsPattern===Rp.lineTerminatorsPattern)i.config.lineTerminatorsPattern=so.LineTerminatorOptimizedTester;else if(i.config.lineTerminatorCharacters===Rp.lineTerminatorCharacters)throw Error(`Error: Missing property on the Lexer config.
+ For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#MISSING_LINE_TERM_CHARS`);if(r.safeMode&&r.ensureOptimizations)throw Error('"safeMode" and "ensureOptimizations" flags are mutually exclusive.');i.trackStartLines=/full|onlyStart/i.test(i.config.positionTracking),i.trackEndLines=/full/i.test(i.config.positionTracking),(0,lr.isArray)(e)?(s={modes:{}},s.modes[so.DEFAULT_MODE]=(0,lr.cloneArr)(e),s[so.DEFAULT_MODE]=so.DEFAULT_MODE):(o=!1,s=(0,lr.cloneObj)(e))}),i.config.skipValidations===!1&&(i.TRACE_INIT("performRuntimeChecks",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,so.performRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))}),i.TRACE_INIT("performWarningRuntimeChecks",function(){i.lexerDefinitionWarning=i.lexerDefinitionWarning.concat((0,so.performWarningRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))})),s.modes=s.modes?s.modes:{},(0,lr.forEach)(s.modes,function(u,g){s.modes[g]=(0,lr.reject)(u,function(f){return(0,lr.isUndefined)(f)})});var a=(0,lr.keys)(s.modes);if((0,lr.forEach)(s.modes,function(u,g){i.TRACE_INIT("Mode: <"+g+"> processing",function(){if(i.modes.push(g),i.config.skipValidations===!1&&i.TRACE_INIT("validatePatterns",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,so.validatePatterns)(u,a))}),(0,lr.isEmpty)(i.lexerDefinitionErrors)){(0,wIe.augmentTokenTypes)(u);var f;i.TRACE_INIT("analyzeTokenTypes",function(){f=(0,so.analyzeTokenTypes)(u,{lineTerminatorCharacters:i.config.lineTerminatorCharacters,positionTracking:r.positionTracking,ensureOptimizations:r.ensureOptimizations,safeMode:r.safeMode,tracer:i.TRACE_INIT.bind(i)})}),i.patternIdxToConfig[g]=f.patternIdxToConfig,i.charCodeToPatternIdxToConfig[g]=f.charCodeToPatternIdxToConfig,i.emptyGroups=(0,lr.merge)(i.emptyGroups,f.emptyGroups),i.hasCustom=f.hasCustom||i.hasCustom,i.canModeBeOptimized[g]=f.canBeOptimized}})}),i.defaultMode=s.defaultMode,!(0,lr.isEmpty)(i.lexerDefinitionErrors)&&!i.config.deferDefinitionErrorsHandling){var l=(0,lr.map)(i.lexerDefinitionErrors,function(u){return u.message}),c=l.join(`-----------------------
+`);throw new Error(`Errors detected in definition of Lexer:
+`+c)}(0,lr.forEach)(i.lexerDefinitionWarning,function(u){(0,lr.PRINT_WARNING)(u.message)}),i.TRACE_INIT("Choosing sub-methods implementations",function(){if(so.SUPPORT_STICKY?(i.chopInput=lr.IDENTITY,i.match=i.matchWithTest):(i.updateLastIndex=lr.NOOP,i.match=i.matchWithExec),o&&(i.handleModes=lr.NOOP),i.trackStartLines===!1&&(i.computeNewColumn=lr.IDENTITY),i.trackEndLines===!1&&(i.updateTokenEndLineColumnLocation=lr.NOOP),/full/i.test(i.config.positionTracking))i.createTokenInstance=i.createFullToken;else if(/onlyStart/i.test(i.config.positionTracking))i.createTokenInstance=i.createStartOnlyToken;else if(/onlyOffset/i.test(i.config.positionTracking))i.createTokenInstance=i.createOffsetOnlyToken;else throw Error('Invalid config option: "'+i.config.positionTracking+'"');i.hasCustom?(i.addToken=i.addTokenUsingPush,i.handlePayload=i.handlePayloadWithCustom):(i.addToken=i.addTokenUsingMemberAccess,i.handlePayload=i.handlePayloadNoCustom)}),i.TRACE_INIT("Failed Optimization Warnings",function(){var u=(0,lr.reduce)(i.canModeBeOptimized,function(g,f,h){return f===!1&&g.push(h),g},[]);if(r.ensureOptimizations&&!(0,lr.isEmpty)(u))throw Error("Lexer Modes: < "+u.join(", ")+` > cannot be optimized.
+ Disable the "ensureOptimizations" lexer config flag to silently ignore this and run the lexer in an un-optimized mode.
+ Or inspect the console log for details on how to resolve these issues.`)}),i.TRACE_INIT("clearRegExpParserCache",function(){(0,bIe.clearRegExpParserCache)()}),i.TRACE_INIT("toFastProperties",function(){(0,lr.toFastProperties)(i)})})}return t.prototype.tokenize=function(e,r){if(r===void 0&&(r=this.defaultMode),!(0,lr.isEmpty)(this.lexerDefinitionErrors)){var i=(0,lr.map)(this.lexerDefinitionErrors,function(o){return o.message}),n=i.join(`-----------------------
+`);throw new Error(`Unable to Tokenize because Errors detected in definition of Lexer:
+`+n)}var s=this.tokenizeInternal(e,r);return s},t.prototype.tokenizeInternal=function(e,r){var i=this,n,s,o,a,l,c,u,g,f,h,p,m,y,Q,S,x,M=e,Y=M.length,U=0,J=0,W=this.hasCustom?0:Math.floor(e.length/10),ee=new Array(W),Z=[],A=this.trackStartLines?1:void 0,ne=this.trackStartLines?1:void 0,le=(0,so.cloneEmptyGroups)(this.emptyGroups),Ae=this.trackStartLines,T=this.config.lineTerminatorsPattern,L=0,Ee=[],we=[],qe=[],re=[];Object.freeze(re);var se=void 0;function Qe(){return Ee}function he(vr){var Hn=(0,so.charCodeToOptimizedIndex)(vr),us=we[Hn];return us===void 0?re:us}var Fe=function(vr){if(qe.length===1&&vr.tokenType.PUSH_MODE===void 0){var Hn=i.config.errorMessageProvider.buildUnableToPopLexerModeMessage(vr);Z.push({offset:vr.startOffset,line:vr.startLine!==void 0?vr.startLine:void 0,column:vr.startColumn!==void 0?vr.startColumn:void 0,length:vr.image.length,message:Hn})}else{qe.pop();var us=(0,lr.last)(qe);Ee=i.patternIdxToConfig[us],we=i.charCodeToPatternIdxToConfig[us],L=Ee.length;var Ia=i.canModeBeOptimized[us]&&i.config.safeMode===!1;we&&Ia?se=he:se=Qe}};function Ue(vr){qe.push(vr),we=this.charCodeToPatternIdxToConfig[vr],Ee=this.patternIdxToConfig[vr],L=Ee.length,L=Ee.length;var Hn=this.canModeBeOptimized[vr]&&this.config.safeMode===!1;we&&Hn?se=he:se=Qe}Ue.call(this,r);for(var xe;Uc.length){c=a,u=g,xe=gt;break}}}break}}if(c!==null){if(f=c.length,h=xe.group,h!==void 0&&(p=xe.tokenTypeIdx,m=this.createTokenInstance(c,U,p,xe.tokenType,A,ne,f),this.handlePayload(m,u),h===!1?J=this.addToken(ee,J,m):le[h].push(m)),e=this.chopInput(e,f),U=U+f,ne=this.computeNewColumn(ne,f),Ae===!0&&xe.canLineTerminator===!0){var Mt=0,mi=void 0,jt=void 0;T.lastIndex=0;do mi=T.test(c),mi===!0&&(jt=T.lastIndex-1,Mt++);while(mi===!0);Mt!==0&&(A=A+Mt,ne=f-jt,this.updateTokenEndLineColumnLocation(m,h,jt,Mt,A,ne,f))}this.handleModes(xe,Fe,Ue,m)}else{for(var Qr=U,Ti=A,_s=ne,Un=!1;!Un&&U <"+e+">");var n=(0,lr.timer)(r),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent time: "+s+"ms"),this.traceInitIndent--,o}else return r()},t.SKIPPED="This marks a skipped Token pattern, this means each token identified by it willbe consumed and then thrown into oblivion, this can be used to for example to completely ignore whitespace.",t.NA=/NOT_APPLICABLE/,t}();Bc.Lexer=vIe});var JA=w(xi=>{"use strict";Object.defineProperty(xi,"__esModule",{value:!0});xi.tokenMatcher=xi.createTokenInstance=xi.EOF=xi.createToken=xi.hasTokenLabel=xi.tokenName=xi.tokenLabel=void 0;var oo=Yt(),SIe=Dp(),Kv=fg();function kIe(t){return wj(t)?t.LABEL:t.name}xi.tokenLabel=kIe;function xIe(t){return t.name}xi.tokenName=xIe;function wj(t){return(0,oo.isString)(t.LABEL)&&t.LABEL!==""}xi.hasTokenLabel=wj;var PIe="parent",Bj="categories",bj="label",Qj="group",vj="push_mode",Sj="pop_mode",kj="longer_alt",xj="line_breaks",Pj="start_chars_hint";function Dj(t){return DIe(t)}xi.createToken=Dj;function DIe(t){var e=t.pattern,r={};if(r.name=t.name,(0,oo.isUndefined)(e)||(r.PATTERN=e),(0,oo.has)(t,PIe))throw`The parent property is no longer supported.
+See: https://github.com/chevrotain/chevrotain/issues/564#issuecomment-349062346 for details.`;return(0,oo.has)(t,Bj)&&(r.CATEGORIES=t[Bj]),(0,Kv.augmentTokenTypes)([r]),(0,oo.has)(t,bj)&&(r.LABEL=t[bj]),(0,oo.has)(t,Qj)&&(r.GROUP=t[Qj]),(0,oo.has)(t,Sj)&&(r.POP_MODE=t[Sj]),(0,oo.has)(t,vj)&&(r.PUSH_MODE=t[vj]),(0,oo.has)(t,kj)&&(r.LONGER_ALT=t[kj]),(0,oo.has)(t,xj)&&(r.LINE_BREAKS=t[xj]),(0,oo.has)(t,Pj)&&(r.START_CHARS_HINT=t[Pj]),r}xi.EOF=Dj({name:"EOF",pattern:SIe.Lexer.NA});(0,Kv.augmentTokenTypes)([xi.EOF]);function RIe(t,e,r,i,n,s,o,a){return{image:e,startOffset:r,endOffset:i,startLine:n,endLine:s,startColumn:o,endColumn:a,tokenTypeIdx:t.tokenTypeIdx,tokenType:t}}xi.createTokenInstance=RIe;function FIe(t,e){return(0,Kv.tokenStructuredMatcher)(t,e)}xi.tokenMatcher=FIe});var bn=w(Vt=>{"use strict";var Ua=Vt&&Vt.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Vt,"__esModule",{value:!0});Vt.serializeProduction=Vt.serializeGrammar=Vt.Terminal=Vt.Alternation=Vt.RepetitionWithSeparator=Vt.Repetition=Vt.RepetitionMandatoryWithSeparator=Vt.RepetitionMandatory=Vt.Option=Vt.Alternative=Vt.Rule=Vt.NonTerminal=Vt.AbstractProduction=void 0;var fr=Yt(),NIe=JA(),To=function(){function t(e){this._definition=e}return Object.defineProperty(t.prototype,"definition",{get:function(){return this._definition},set:function(e){this._definition=e},enumerable:!1,configurable:!0}),t.prototype.accept=function(e){e.visit(this),(0,fr.forEach)(this.definition,function(r){r.accept(e)})},t}();Vt.AbstractProduction=To;var Rj=function(t){Ua(e,t);function e(r){var i=t.call(this,[])||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,"definition",{get:function(){return this.referencedRule!==void 0?this.referencedRule.definition:[]},set:function(r){},enumerable:!1,configurable:!0}),e.prototype.accept=function(r){r.visit(this)},e}(To);Vt.NonTerminal=Rj;var Fj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.orgText="",(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Rule=Fj;var Nj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.ignoreAmbiguities=!1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Alternative=Nj;var Lj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Option=Lj;var Tj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.RepetitionMandatory=Tj;var Oj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.RepetitionMandatoryWithSeparator=Oj;var Mj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Repetition=Mj;var Uj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.RepetitionWithSeparator=Uj;var Kj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,i.ignoreAmbiguities=!1,i.hasPredicates=!1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,"definition",{get:function(){return this._definition},set:function(r){this._definition=r},enumerable:!1,configurable:!0}),e}(To);Vt.Alternation=Kj;var JI=function(){function t(e){this.idx=1,(0,fr.assign)(this,(0,fr.pick)(e,function(r){return r!==void 0}))}return t.prototype.accept=function(e){e.visit(this)},t}();Vt.Terminal=JI;function LIe(t){return(0,fr.map)(t,Fp)}Vt.serializeGrammar=LIe;function Fp(t){function e(s){return(0,fr.map)(s,Fp)}if(t instanceof Rj){var r={type:"NonTerminal",name:t.nonTerminalName,idx:t.idx};return(0,fr.isString)(t.label)&&(r.label=t.label),r}else{if(t instanceof Nj)return{type:"Alternative",definition:e(t.definition)};if(t instanceof Lj)return{type:"Option",idx:t.idx,definition:e(t.definition)};if(t instanceof Tj)return{type:"RepetitionMandatory",idx:t.idx,definition:e(t.definition)};if(t instanceof Oj)return{type:"RepetitionMandatoryWithSeparator",idx:t.idx,separator:Fp(new JI({terminalType:t.separator})),definition:e(t.definition)};if(t instanceof Uj)return{type:"RepetitionWithSeparator",idx:t.idx,separator:Fp(new JI({terminalType:t.separator})),definition:e(t.definition)};if(t instanceof Mj)return{type:"Repetition",idx:t.idx,definition:e(t.definition)};if(t instanceof Kj)return{type:"Alternation",idx:t.idx,definition:e(t.definition)};if(t instanceof JI){var i={type:"Terminal",name:t.terminalType.name,label:(0,NIe.tokenLabel)(t.terminalType),idx:t.idx};(0,fr.isString)(t.label)&&(i.terminalLabel=t.label);var n=t.terminalType.PATTERN;return t.terminalType.PATTERN&&(i.pattern=(0,fr.isRegExp)(n)?n.source:n),i}else{if(t instanceof Fj)return{type:"Rule",name:t.name,orgText:t.orgText,definition:e(t.definition)};throw Error("non exhaustive match")}}}Vt.serializeProduction=Fp});var zI=w(WI=>{"use strict";Object.defineProperty(WI,"__esModule",{value:!0});WI.RestWalker=void 0;var Hv=Yt(),Qn=bn(),TIe=function(){function t(){}return t.prototype.walk=function(e,r){var i=this;r===void 0&&(r=[]),(0,Hv.forEach)(e.definition,function(n,s){var o=(0,Hv.drop)(e.definition,s+1);if(n instanceof Qn.NonTerminal)i.walkProdRef(n,o,r);else if(n instanceof Qn.Terminal)i.walkTerminal(n,o,r);else if(n instanceof Qn.Alternative)i.walkFlat(n,o,r);else if(n instanceof Qn.Option)i.walkOption(n,o,r);else if(n instanceof Qn.RepetitionMandatory)i.walkAtLeastOne(n,o,r);else if(n instanceof Qn.RepetitionMandatoryWithSeparator)i.walkAtLeastOneSep(n,o,r);else if(n instanceof Qn.RepetitionWithSeparator)i.walkManySep(n,o,r);else if(n instanceof Qn.Repetition)i.walkMany(n,o,r);else if(n instanceof Qn.Alternation)i.walkOr(n,o,r);else throw Error("non exhaustive match")})},t.prototype.walkTerminal=function(e,r,i){},t.prototype.walkProdRef=function(e,r,i){},t.prototype.walkFlat=function(e,r,i){var n=r.concat(i);this.walk(e,n)},t.prototype.walkOption=function(e,r,i){var n=r.concat(i);this.walk(e,n)},t.prototype.walkAtLeastOne=function(e,r,i){var n=[new Qn.Option({definition:e.definition})].concat(r,i);this.walk(e,n)},t.prototype.walkAtLeastOneSep=function(e,r,i){var n=Hj(e,r,i);this.walk(e,n)},t.prototype.walkMany=function(e,r,i){var n=[new Qn.Option({definition:e.definition})].concat(r,i);this.walk(e,n)},t.prototype.walkManySep=function(e,r,i){var n=Hj(e,r,i);this.walk(e,n)},t.prototype.walkOr=function(e,r,i){var n=this,s=r.concat(i);(0,Hv.forEach)(e.definition,function(o){var a=new Qn.Alternative({definition:[o]});n.walk(a,s)})},t}();WI.RestWalker=TIe;function Hj(t,e,r){var i=[new Qn.Option({definition:[new Qn.Terminal({terminalType:t.separator})].concat(t.definition)})],n=i.concat(e,r);return n}});var hg=w(_I=>{"use strict";Object.defineProperty(_I,"__esModule",{value:!0});_I.GAstVisitor=void 0;var Oo=bn(),OIe=function(){function t(){}return t.prototype.visit=function(e){var r=e;switch(r.constructor){case Oo.NonTerminal:return this.visitNonTerminal(r);case Oo.Alternative:return this.visitAlternative(r);case Oo.Option:return this.visitOption(r);case Oo.RepetitionMandatory:return this.visitRepetitionMandatory(r);case Oo.RepetitionMandatoryWithSeparator:return this.visitRepetitionMandatoryWithSeparator(r);case Oo.RepetitionWithSeparator:return this.visitRepetitionWithSeparator(r);case Oo.Repetition:return this.visitRepetition(r);case Oo.Alternation:return this.visitAlternation(r);case Oo.Terminal:return this.visitTerminal(r);case Oo.Rule:return this.visitRule(r);default:throw Error("non exhaustive match")}},t.prototype.visitNonTerminal=function(e){},t.prototype.visitAlternative=function(e){},t.prototype.visitOption=function(e){},t.prototype.visitRepetition=function(e){},t.prototype.visitRepetitionMandatory=function(e){},t.prototype.visitRepetitionMandatoryWithSeparator=function(e){},t.prototype.visitRepetitionWithSeparator=function(e){},t.prototype.visitAlternation=function(e){},t.prototype.visitTerminal=function(e){},t.prototype.visitRule=function(e){},t}();_I.GAstVisitor=OIe});var Lp=w(Gi=>{"use strict";var MIe=Gi&&Gi.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Gi,"__esModule",{value:!0});Gi.collectMethods=Gi.DslMethodsCollectorVisitor=Gi.getProductionDslName=Gi.isBranchingProd=Gi.isOptionalProd=Gi.isSequenceProd=void 0;var Np=Yt(),kr=bn(),UIe=hg();function KIe(t){return t instanceof kr.Alternative||t instanceof kr.Option||t instanceof kr.Repetition||t instanceof kr.RepetitionMandatory||t instanceof kr.RepetitionMandatoryWithSeparator||t instanceof kr.RepetitionWithSeparator||t instanceof kr.Terminal||t instanceof kr.Rule}Gi.isSequenceProd=KIe;function jv(t,e){e===void 0&&(e=[]);var r=t instanceof kr.Option||t instanceof kr.Repetition||t instanceof kr.RepetitionWithSeparator;return r?!0:t instanceof kr.Alternation?(0,Np.some)(t.definition,function(i){return jv(i,e)}):t instanceof kr.NonTerminal&&(0,Np.contains)(e,t)?!1:t instanceof kr.AbstractProduction?(t instanceof kr.NonTerminal&&e.push(t),(0,Np.every)(t.definition,function(i){return jv(i,e)})):!1}Gi.isOptionalProd=jv;function HIe(t){return t instanceof kr.Alternation}Gi.isBranchingProd=HIe;function jIe(t){if(t instanceof kr.NonTerminal)return"SUBRULE";if(t instanceof kr.Option)return"OPTION";if(t instanceof kr.Alternation)return"OR";if(t instanceof kr.RepetitionMandatory)return"AT_LEAST_ONE";if(t instanceof kr.RepetitionMandatoryWithSeparator)return"AT_LEAST_ONE_SEP";if(t instanceof kr.RepetitionWithSeparator)return"MANY_SEP";if(t instanceof kr.Repetition)return"MANY";if(t instanceof kr.Terminal)return"CONSUME";throw Error("non exhaustive match")}Gi.getProductionDslName=jIe;var jj=function(t){MIe(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.separator="-",r.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]},r}return e.prototype.reset=function(){this.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]}},e.prototype.visitTerminal=function(r){var i=r.terminalType.name+this.separator+"Terminal";(0,Np.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(r)},e.prototype.visitNonTerminal=function(r){var i=r.nonTerminalName+this.separator+"Terminal";(0,Np.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(r)},e.prototype.visitOption=function(r){this.dslMethods.option.push(r)},e.prototype.visitRepetitionWithSeparator=function(r){this.dslMethods.repetitionWithSeparator.push(r)},e.prototype.visitRepetitionMandatory=function(r){this.dslMethods.repetitionMandatory.push(r)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.dslMethods.repetitionMandatoryWithSeparator.push(r)},e.prototype.visitRepetition=function(r){this.dslMethods.repetition.push(r)},e.prototype.visitAlternation=function(r){this.dslMethods.alternation.push(r)},e}(UIe.GAstVisitor);Gi.DslMethodsCollectorVisitor=jj;var VI=new jj;function GIe(t){VI.reset(),t.accept(VI);var e=VI.dslMethods;return VI.reset(),e}Gi.collectMethods=GIe});var Yv=w(Mo=>{"use strict";Object.defineProperty(Mo,"__esModule",{value:!0});Mo.firstForTerminal=Mo.firstForBranching=Mo.firstForSequence=Mo.first=void 0;var XI=Yt(),Gj=bn(),Gv=Lp();function ZI(t){if(t instanceof Gj.NonTerminal)return ZI(t.referencedRule);if(t instanceof Gj.Terminal)return Jj(t);if((0,Gv.isSequenceProd)(t))return Yj(t);if((0,Gv.isBranchingProd)(t))return qj(t);throw Error("non exhaustive match")}Mo.first=ZI;function Yj(t){for(var e=[],r=t.definition,i=0,n=r.length>i,s,o=!0;n&&o;)s=r[i],o=(0,Gv.isOptionalProd)(s),e=e.concat(ZI(s)),i=i+1,n=r.length>i;return(0,XI.uniq)(e)}Mo.firstForSequence=Yj;function qj(t){var e=(0,XI.map)(t.definition,function(r){return ZI(r)});return(0,XI.uniq)((0,XI.flatten)(e))}Mo.firstForBranching=qj;function Jj(t){return[t.terminalType]}Mo.firstForTerminal=Jj});var qv=w($I=>{"use strict";Object.defineProperty($I,"__esModule",{value:!0});$I.IN=void 0;$I.IN="_~IN~_"});var Xj=w(Qs=>{"use strict";var YIe=Qs&&Qs.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Qs,"__esModule",{value:!0});Qs.buildInProdFollowPrefix=Qs.buildBetweenProdsFollowPrefix=Qs.computeAllProdsFollows=Qs.ResyncFollowsWalker=void 0;var qIe=zI(),JIe=Yv(),Wj=Yt(),zj=qv(),WIe=bn(),Vj=function(t){YIe(e,t);function e(r){var i=t.call(this)||this;return i.topProd=r,i.follows={},i}return e.prototype.startWalking=function(){return this.walk(this.topProd),this.follows},e.prototype.walkTerminal=function(r,i,n){},e.prototype.walkProdRef=function(r,i,n){var s=_j(r.referencedRule,r.idx)+this.topProd.name,o=i.concat(n),a=new WIe.Alternative({definition:o}),l=(0,JIe.first)(a);this.follows[s]=l},e}(qIe.RestWalker);Qs.ResyncFollowsWalker=Vj;function zIe(t){var e={};return(0,Wj.forEach)(t,function(r){var i=new Vj(r).startWalking();(0,Wj.assign)(e,i)}),e}Qs.computeAllProdsFollows=zIe;function _j(t,e){return t.name+e+zj.IN}Qs.buildBetweenProdsFollowPrefix=_j;function _Ie(t){var e=t.terminalType.name;return e+t.idx+zj.IN}Qs.buildInProdFollowPrefix=_Ie});var Tp=w(Ka=>{"use strict";Object.defineProperty(Ka,"__esModule",{value:!0});Ka.defaultGrammarValidatorErrorProvider=Ka.defaultGrammarResolverErrorProvider=Ka.defaultParserErrorProvider=void 0;var pg=JA(),VIe=Yt(),ao=Yt(),Jv=bn(),Zj=Lp();Ka.defaultParserErrorProvider={buildMismatchTokenMessage:function(t){var e=t.expected,r=t.actual,i=t.previous,n=t.ruleName,s=(0,pg.hasTokenLabel)(e),o=s?"--> "+(0,pg.tokenLabel)(e)+" <--":"token of type --> "+e.name+" <--",a="Expecting "+o+" but found --> '"+r.image+"' <--";return a},buildNotAllInputParsedMessage:function(t){var e=t.firstRedundant,r=t.ruleName;return"Redundant input, expecting EOF but found: "+e.image},buildNoViableAltMessage:function(t){var e=t.expectedPathsPerAlt,r=t.actual,i=t.previous,n=t.customUserDescription,s=t.ruleName,o="Expecting: ",a=(0,ao.first)(r).image,l=`
+but found: '`+a+"'";if(n)return o+n+l;var c=(0,ao.reduce)(e,function(h,p){return h.concat(p)},[]),u=(0,ao.map)(c,function(h){return"["+(0,ao.map)(h,function(p){return(0,pg.tokenLabel)(p)}).join(", ")+"]"}),g=(0,ao.map)(u,function(h,p){return" "+(p+1)+". "+h}),f=`one of these possible Token sequences:
+`+g.join(`
+`);return o+f+l},buildEarlyExitMessage:function(t){var e=t.expectedIterationPaths,r=t.actual,i=t.customUserDescription,n=t.ruleName,s="Expecting: ",o=(0,ao.first)(r).image,a=`
+but found: '`+o+"'";if(i)return s+i+a;var l=(0,ao.map)(e,function(u){return"["+(0,ao.map)(u,function(g){return(0,pg.tokenLabel)(g)}).join(",")+"]"}),c=`expecting at least one iteration which starts with one of these possible Token sequences::
+ `+("<"+l.join(" ,")+">");return s+c+a}};Object.freeze(Ka.defaultParserErrorProvider);Ka.defaultGrammarResolverErrorProvider={buildRuleNotFoundError:function(t,e){var r="Invalid grammar, reference to a rule which is not defined: ->"+e.nonTerminalName+`<-
+inside top level rule: ->`+t.name+"<-";return r}};Ka.defaultGrammarValidatorErrorProvider={buildDuplicateFoundError:function(t,e){function r(u){return u instanceof Jv.Terminal?u.terminalType.name:u instanceof Jv.NonTerminal?u.nonTerminalName:""}var i=t.name,n=(0,ao.first)(e),s=n.idx,o=(0,Zj.getProductionDslName)(n),a=r(n),l=s>0,c="->"+o+(l?s:"")+"<- "+(a?"with argument: ->"+a+"<-":"")+`
+ appears more than once (`+e.length+" times) in the top level rule: ->"+i+`<-.
+ For further details see: https://chevrotain.io/docs/FAQ.html#NUMERICAL_SUFFIXES
+ `;return c=c.replace(/[ \t]+/g," "),c=c.replace(/\s\s+/g,`
+`),c},buildNamespaceConflictError:function(t){var e=`Namespace conflict found in grammar.
+`+("The grammar has both a Terminal(Token) and a Non-Terminal(Rule) named: <"+t.name+`>.
+`)+`To resolve this make sure each Terminal and Non-Terminal names are unique
+This is easy to accomplish by using the convention that Terminal names start with an uppercase letter
+and Non-Terminal names start with a lower case letter.`;return e},buildAlternationPrefixAmbiguityError:function(t){var e=(0,ao.map)(t.prefixPath,function(n){return(0,pg.tokenLabel)(n)}).join(", "),r=t.alternation.idx===0?"":t.alternation.idx,i="Ambiguous alternatives: <"+t.ambiguityIndices.join(" ,")+`> due to common lookahead prefix
+`+("in inside <"+t.topLevelRule.name+`> Rule,
+`)+("<"+e+`> may appears as a prefix path in all these alternatives.
+`)+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#COMMON_PREFIX
+For Further details.`;return i},buildAlternationAmbiguityError:function(t){var e=(0,ao.map)(t.prefixPath,function(n){return(0,pg.tokenLabel)(n)}).join(", "),r=t.alternation.idx===0?"":t.alternation.idx,i="Ambiguous Alternatives Detected: <"+t.ambiguityIndices.join(" ,")+"> in "+(" inside <"+t.topLevelRule.name+`> Rule,
+`)+("<"+e+`> may appears as a prefix path in all these alternatives.
+`);return i=i+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#AMBIGUOUS_ALTERNATIVES
+For Further details.`,i},buildEmptyRepetitionError:function(t){var e=(0,Zj.getProductionDslName)(t.repetition);t.repetition.idx!==0&&(e+=t.repetition.idx);var r="The repetition <"+e+"> within Rule <"+t.topLevelRule.name+`> can never consume any tokens.
+This could lead to an infinite loop.`;return r},buildTokenNameError:function(t){return"deprecated"},buildEmptyAlternationError:function(t){var e="Ambiguous empty alternative: <"+(t.emptyChoiceIdx+1)+">"+(" in inside <"+t.topLevelRule.name+`> Rule.
+`)+"Only the last alternative may be an empty alternative.";return e},buildTooManyAlternativesError:function(t){var e=`An Alternation cannot have more than 256 alternatives:
+`+(" inside <"+t.topLevelRule.name+`> Rule.
+ has `+(t.alternation.definition.length+1)+" alternatives.");return e},buildLeftRecursionError:function(t){var e=t.topLevelRule.name,r=VIe.map(t.leftRecursionPath,function(s){return s.name}),i=e+" --> "+r.concat([e]).join(" --> "),n=`Left Recursion found in grammar.
+`+("rule: <"+e+`> can be invoked from itself (directly or indirectly)
+`)+(`without consuming any Tokens. The grammar path that causes this is:
+ `+i+`
+`)+` To fix this refactor your grammar to remove the left recursion.
+see: https://en.wikipedia.org/wiki/LL_parser#Left_Factoring.`;return n},buildInvalidRuleNameError:function(t){return"deprecated"},buildDuplicateRuleNameError:function(t){var e;t.topLevelRule instanceof Jv.Rule?e=t.topLevelRule.name:e=t.topLevelRule;var r="Duplicate definition, rule: ->"+e+"<- is already defined in the grammar: ->"+t.grammarName+"<-";return r}}});var tG=w(WA=>{"use strict";var XIe=WA&&WA.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(WA,"__esModule",{value:!0});WA.GastRefResolverVisitor=WA.resolveGrammar=void 0;var ZIe=Xn(),$j=Yt(),$Ie=hg();function eye(t,e){var r=new eG(t,e);return r.resolveRefs(),r.errors}WA.resolveGrammar=eye;var eG=function(t){XIe(e,t);function e(r,i){var n=t.call(this)||this;return n.nameToTopRule=r,n.errMsgProvider=i,n.errors=[],n}return e.prototype.resolveRefs=function(){var r=this;(0,$j.forEach)((0,$j.values)(this.nameToTopRule),function(i){r.currTopLevel=i,i.accept(r)})},e.prototype.visitNonTerminal=function(r){var i=this.nameToTopRule[r.nonTerminalName];if(i)r.referencedRule=i;else{var n=this.errMsgProvider.buildRuleNotFoundError(this.currTopLevel,r);this.errors.push({message:n,type:ZIe.ParserDefinitionErrorType.UNRESOLVED_SUBRULE_REF,ruleName:this.currTopLevel.name,unresolvedRefName:r.nonTerminalName})}},e}($Ie.GAstVisitor);WA.GastRefResolverVisitor=eG});var Mp=w(Mr=>{"use strict";var bc=Mr&&Mr.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Mr,"__esModule",{value:!0});Mr.nextPossibleTokensAfter=Mr.possiblePathsFrom=Mr.NextTerminalAfterAtLeastOneSepWalker=Mr.NextTerminalAfterAtLeastOneWalker=Mr.NextTerminalAfterManySepWalker=Mr.NextTerminalAfterManyWalker=Mr.AbstractNextTerminalAfterProductionWalker=Mr.NextAfterTokenWalker=Mr.AbstractNextPossibleTokensWalker=void 0;var rG=zI(),Ut=Yt(),tye=Yv(),Dt=bn(),iG=function(t){bc(e,t);function e(r,i){var n=t.call(this)||this;return n.topProd=r,n.path=i,n.possibleTokTypes=[],n.nextProductionName="",n.nextProductionOccurrence=0,n.found=!1,n.isAtEndOfPath=!1,n}return e.prototype.startWalking=function(){if(this.found=!1,this.path.ruleStack[0]!==this.topProd.name)throw Error("The path does not start with the walker's top Rule!");return this.ruleStack=(0,Ut.cloneArr)(this.path.ruleStack).reverse(),this.occurrenceStack=(0,Ut.cloneArr)(this.path.occurrenceStack).reverse(),this.ruleStack.pop(),this.occurrenceStack.pop(),this.updateExpectedNext(),this.walk(this.topProd),this.possibleTokTypes},e.prototype.walk=function(r,i){i===void 0&&(i=[]),this.found||t.prototype.walk.call(this,r,i)},e.prototype.walkProdRef=function(r,i,n){if(r.referencedRule.name===this.nextProductionName&&r.idx===this.nextProductionOccurrence){var s=i.concat(n);this.updateExpectedNext(),this.walk(r.referencedRule,s)}},e.prototype.updateExpectedNext=function(){(0,Ut.isEmpty)(this.ruleStack)?(this.nextProductionName="",this.nextProductionOccurrence=0,this.isAtEndOfPath=!0):(this.nextProductionName=this.ruleStack.pop(),this.nextProductionOccurrence=this.occurrenceStack.pop())},e}(rG.RestWalker);Mr.AbstractNextPossibleTokensWalker=iG;var rye=function(t){bc(e,t);function e(r,i){var n=t.call(this,r,i)||this;return n.path=i,n.nextTerminalName="",n.nextTerminalOccurrence=0,n.nextTerminalName=n.path.lastTok.name,n.nextTerminalOccurrence=n.path.lastTokOccurrence,n}return e.prototype.walkTerminal=function(r,i,n){if(this.isAtEndOfPath&&r.terminalType.name===this.nextTerminalName&&r.idx===this.nextTerminalOccurrence&&!this.found){var s=i.concat(n),o=new Dt.Alternative({definition:s});this.possibleTokTypes=(0,tye.first)(o),this.found=!0}},e}(iG);Mr.NextAfterTokenWalker=rye;var Op=function(t){bc(e,t);function e(r,i){var n=t.call(this)||this;return n.topRule=r,n.occurrence=i,n.result={token:void 0,occurrence:void 0,isEndOfRule:void 0},n}return e.prototype.startWalking=function(){return this.walk(this.topRule),this.result},e}(rG.RestWalker);Mr.AbstractNextTerminalAfterProductionWalker=Op;var iye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkMany=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkMany.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterManyWalker=iye;var nye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkManySep=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkManySep.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterManySepWalker=nye;var sye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkAtLeastOne=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkAtLeastOne.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterAtLeastOneWalker=sye;var oye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkAtLeastOneSep=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkAtLeastOneSep.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterAtLeastOneSepWalker=oye;function nG(t,e,r){r===void 0&&(r=[]),r=(0,Ut.cloneArr)(r);var i=[],n=0;function s(c){return c.concat((0,Ut.drop)(t,n+1))}function o(c){var u=nG(s(c),e,r);return i.concat(u)}for(;r.length=0;le--){var Ae=Q.definition[le],T={idx:p,def:Ae.definition.concat((0,Ut.drop)(h)),ruleStack:m,occurrenceStack:y};g.push(T),g.push(o)}else if(Q instanceof Dt.Alternative)g.push({idx:p,def:Q.definition.concat((0,Ut.drop)(h)),ruleStack:m,occurrenceStack:y});else if(Q instanceof Dt.Rule)g.push(aye(Q,p,m,y));else throw Error("non exhaustive match")}}return u}Mr.nextPossibleTokensAfter=Aye;function aye(t,e,r,i){var n=(0,Ut.cloneArr)(r);n.push(t.name);var s=(0,Ut.cloneArr)(i);return s.push(1),{idx:e,def:t.definition,ruleStack:n,occurrenceStack:s}}});var Up=w(tr=>{"use strict";var sG=tr&&tr.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(tr,"__esModule",{value:!0});tr.areTokenCategoriesNotUsed=tr.isStrictPrefixOfPath=tr.containsPath=tr.getLookaheadPathsForOptionalProd=tr.getLookaheadPathsForOr=tr.lookAheadSequenceFromAlternatives=tr.buildSingleAlternativeLookaheadFunction=tr.buildAlternativesLookAheadFunc=tr.buildLookaheadFuncForOptionalProd=tr.buildLookaheadFuncForOr=tr.getProdType=tr.PROD_TYPE=void 0;var cr=Yt(),oG=Mp(),lye=zI(),ey=fg(),zA=bn(),cye=hg(),ci;(function(t){t[t.OPTION=0]="OPTION",t[t.REPETITION=1]="REPETITION",t[t.REPETITION_MANDATORY=2]="REPETITION_MANDATORY",t[t.REPETITION_MANDATORY_WITH_SEPARATOR=3]="REPETITION_MANDATORY_WITH_SEPARATOR",t[t.REPETITION_WITH_SEPARATOR=4]="REPETITION_WITH_SEPARATOR",t[t.ALTERNATION=5]="ALTERNATION"})(ci=tr.PROD_TYPE||(tr.PROD_TYPE={}));function uye(t){if(t instanceof zA.Option)return ci.OPTION;if(t instanceof zA.Repetition)return ci.REPETITION;if(t instanceof zA.RepetitionMandatory)return ci.REPETITION_MANDATORY;if(t instanceof zA.RepetitionMandatoryWithSeparator)return ci.REPETITION_MANDATORY_WITH_SEPARATOR;if(t instanceof zA.RepetitionWithSeparator)return ci.REPETITION_WITH_SEPARATOR;if(t instanceof zA.Alternation)return ci.ALTERNATION;throw Error("non exhaustive match")}tr.getProdType=uye;function gye(t,e,r,i,n,s){var o=aG(t,e,r),a=Wv(o)?ey.tokenStructuredMatcherNoCategories:ey.tokenStructuredMatcher;return s(o,i,a,n)}tr.buildLookaheadFuncForOr=gye;function fye(t,e,r,i,n,s){var o=AG(t,e,n,r),a=Wv(o)?ey.tokenStructuredMatcherNoCategories:ey.tokenStructuredMatcher;return s(o[0],a,i)}tr.buildLookaheadFuncForOptionalProd=fye;function hye(t,e,r,i){var n=t.length,s=(0,cr.every)(t,function(l){return(0,cr.every)(l,function(c){return c.length===1})});if(e)return function(l){for(var c=(0,cr.map)(l,function(x){return x.GATE}),u=0;u{"use strict";var Vv=Xt&&Xt.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Xt,"__esModule",{value:!0});Xt.checkPrefixAlternativesAmbiguities=Xt.validateSomeNonEmptyLookaheadPath=Xt.validateTooManyAlts=Xt.RepetionCollector=Xt.validateAmbiguousAlternationAlternatives=Xt.validateEmptyOrAlternative=Xt.getFirstNoneTerminal=Xt.validateNoLeftRecursion=Xt.validateRuleIsOverridden=Xt.validateRuleDoesNotAlreadyExist=Xt.OccurrenceValidationCollector=Xt.identifyProductionForDuplicates=Xt.validateGrammar=void 0;var nr=Yt(),xr=Yt(),Uo=Xn(),Xv=Lp(),dg=Up(),Eye=Mp(),Ao=bn(),Zv=hg();function wye(t,e,r,i,n){var s=nr.map(t,function(h){return Iye(h,i)}),o=nr.map(t,function(h){return $v(h,h,i)}),a=[],l=[],c=[];(0,xr.every)(o,xr.isEmpty)&&(a=(0,xr.map)(t,function(h){return fG(h,i)}),l=(0,xr.map)(t,function(h){return hG(h,e,i)}),c=dG(t,e,i));var u=yye(t,r,i),g=(0,xr.map)(t,function(h){return pG(h,i)}),f=(0,xr.map)(t,function(h){return gG(h,t,n,i)});return nr.flatten(s.concat(c,o,a,l,u,g,f))}Xt.validateGrammar=wye;function Iye(t,e){var r=new EG;t.accept(r);var i=r.allProductions,n=nr.groupBy(i,CG),s=nr.pick(n,function(a){return a.length>1}),o=nr.map(nr.values(s),function(a){var l=nr.first(a),c=e.buildDuplicateFoundError(t,a),u=(0,Xv.getProductionDslName)(l),g={message:c,type:Uo.ParserDefinitionErrorType.DUPLICATE_PRODUCTIONS,ruleName:t.name,dslName:u,occurrence:l.idx},f=mG(l);return f&&(g.parameter=f),g});return o}function CG(t){return(0,Xv.getProductionDslName)(t)+"_#_"+t.idx+"_#_"+mG(t)}Xt.identifyProductionForDuplicates=CG;function mG(t){return t instanceof Ao.Terminal?t.terminalType.name:t instanceof Ao.NonTerminal?t.nonTerminalName:""}var EG=function(t){Vv(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.allProductions=[],r}return e.prototype.visitNonTerminal=function(r){this.allProductions.push(r)},e.prototype.visitOption=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatory=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetition=function(r){this.allProductions.push(r)},e.prototype.visitAlternation=function(r){this.allProductions.push(r)},e.prototype.visitTerminal=function(r){this.allProductions.push(r)},e}(Zv.GAstVisitor);Xt.OccurrenceValidationCollector=EG;function gG(t,e,r,i){var n=[],s=(0,xr.reduce)(e,function(a,l){return l.name===t.name?a+1:a},0);if(s>1){var o=i.buildDuplicateRuleNameError({topLevelRule:t,grammarName:r});n.push({message:o,type:Uo.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:t.name})}return n}Xt.validateRuleDoesNotAlreadyExist=gG;function Bye(t,e,r){var i=[],n;return nr.contains(e,t)||(n="Invalid rule override, rule: ->"+t+"<- cannot be overridden in the grammar: ->"+r+"<-as it is not defined in any of the super grammars ",i.push({message:n,type:Uo.ParserDefinitionErrorType.INVALID_RULE_OVERRIDE,ruleName:t})),i}Xt.validateRuleIsOverridden=Bye;function $v(t,e,r,i){i===void 0&&(i=[]);var n=[],s=Kp(e.definition);if(nr.isEmpty(s))return[];var o=t.name,a=nr.contains(s,t);a&&n.push({message:r.buildLeftRecursionError({topLevelRule:t,leftRecursionPath:i}),type:Uo.ParserDefinitionErrorType.LEFT_RECURSION,ruleName:o});var l=nr.difference(s,i.concat([t])),c=nr.map(l,function(u){var g=nr.cloneArr(i);return g.push(u),$v(t,u,r,g)});return n.concat(nr.flatten(c))}Xt.validateNoLeftRecursion=$v;function Kp(t){var e=[];if(nr.isEmpty(t))return e;var r=nr.first(t);if(r instanceof Ao.NonTerminal)e.push(r.referencedRule);else if(r instanceof Ao.Alternative||r instanceof Ao.Option||r instanceof Ao.RepetitionMandatory||r instanceof Ao.RepetitionMandatoryWithSeparator||r instanceof Ao.RepetitionWithSeparator||r instanceof Ao.Repetition)e=e.concat(Kp(r.definition));else if(r instanceof Ao.Alternation)e=nr.flatten(nr.map(r.definition,function(o){return Kp(o.definition)}));else if(!(r instanceof Ao.Terminal))throw Error("non exhaustive match");var i=(0,Xv.isOptionalProd)(r),n=t.length>1;if(i&&n){var s=nr.drop(t);return e.concat(Kp(s))}else return e}Xt.getFirstNoneTerminal=Kp;var eS=function(t){Vv(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.alternations=[],r}return e.prototype.visitAlternation=function(r){this.alternations.push(r)},e}(Zv.GAstVisitor);function fG(t,e){var r=new eS;t.accept(r);var i=r.alternations,n=nr.reduce(i,function(s,o){var a=nr.dropRight(o.definition),l=nr.map(a,function(c,u){var g=(0,Eye.nextPossibleTokensAfter)([c],[],null,1);return nr.isEmpty(g)?{message:e.buildEmptyAlternationError({topLevelRule:t,alternation:o,emptyChoiceIdx:u}),type:Uo.ParserDefinitionErrorType.NONE_LAST_EMPTY_ALT,ruleName:t.name,occurrence:o.idx,alternative:u+1}:null});return s.concat(nr.compact(l))},[]);return n}Xt.validateEmptyOrAlternative=fG;function hG(t,e,r){var i=new eS;t.accept(i);var n=i.alternations;n=(0,xr.reject)(n,function(o){return o.ignoreAmbiguities===!0});var s=nr.reduce(n,function(o,a){var l=a.idx,c=a.maxLookahead||e,u=(0,dg.getLookaheadPathsForOr)(l,t,c,a),g=bye(u,a,t,r),f=IG(u,a,t,r);return o.concat(g,f)},[]);return s}Xt.validateAmbiguousAlternationAlternatives=hG;var yG=function(t){Vv(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.allProductions=[],r}return e.prototype.visitRepetitionWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatory=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetition=function(r){this.allProductions.push(r)},e}(Zv.GAstVisitor);Xt.RepetionCollector=yG;function pG(t,e){var r=new eS;t.accept(r);var i=r.alternations,n=nr.reduce(i,function(s,o){return o.definition.length>255&&s.push({message:e.buildTooManyAlternativesError({topLevelRule:t,alternation:o}),type:Uo.ParserDefinitionErrorType.TOO_MANY_ALTS,ruleName:t.name,occurrence:o.idx}),s},[]);return n}Xt.validateTooManyAlts=pG;function dG(t,e,r){var i=[];return(0,xr.forEach)(t,function(n){var s=new yG;n.accept(s);var o=s.allProductions;(0,xr.forEach)(o,function(a){var l=(0,dg.getProdType)(a),c=a.maxLookahead||e,u=a.idx,g=(0,dg.getLookaheadPathsForOptionalProd)(u,n,l,c),f=g[0];if((0,xr.isEmpty)((0,xr.flatten)(f))){var h=r.buildEmptyRepetitionError({topLevelRule:n,repetition:a});i.push({message:h,type:Uo.ParserDefinitionErrorType.NO_NON_EMPTY_LOOKAHEAD,ruleName:n.name})}})}),i}Xt.validateSomeNonEmptyLookaheadPath=dG;function bye(t,e,r,i){var n=[],s=(0,xr.reduce)(t,function(a,l,c){return e.definition[c].ignoreAmbiguities===!0||(0,xr.forEach)(l,function(u){var g=[c];(0,xr.forEach)(t,function(f,h){c!==h&&(0,dg.containsPath)(f,u)&&e.definition[h].ignoreAmbiguities!==!0&&g.push(h)}),g.length>1&&!(0,dg.containsPath)(n,u)&&(n.push(u),a.push({alts:g,path:u}))}),a},[]),o=nr.map(s,function(a){var l=(0,xr.map)(a.alts,function(u){return u+1}),c=i.buildAlternationAmbiguityError({topLevelRule:r,alternation:e,ambiguityIndices:l,prefixPath:a.path});return{message:c,type:Uo.ParserDefinitionErrorType.AMBIGUOUS_ALTS,ruleName:r.name,occurrence:e.idx,alternatives:[a.alts]}});return o}function IG(t,e,r,i){var n=[],s=(0,xr.reduce)(t,function(o,a,l){var c=(0,xr.map)(a,function(u){return{idx:l,path:u}});return o.concat(c)},[]);return(0,xr.forEach)(s,function(o){var a=e.definition[o.idx];if(a.ignoreAmbiguities!==!0){var l=o.idx,c=o.path,u=(0,xr.findAll)(s,function(f){return e.definition[f.idx].ignoreAmbiguities!==!0&&f.idx{"use strict";Object.defineProperty(Cg,"__esModule",{value:!0});Cg.validateGrammar=Cg.resolveGrammar=void 0;var rS=Yt(),Qye=tG(),vye=tS(),wG=Tp();function Sye(t){t=(0,rS.defaults)(t,{errMsgProvider:wG.defaultGrammarResolverErrorProvider});var e={};return(0,rS.forEach)(t.rules,function(r){e[r.name]=r}),(0,Qye.resolveGrammar)(e,t.errMsgProvider)}Cg.resolveGrammar=Sye;function kye(t){return t=(0,rS.defaults)(t,{errMsgProvider:wG.defaultGrammarValidatorErrorProvider}),(0,vye.validateGrammar)(t.rules,t.maxLookahead,t.tokenTypes,t.errMsgProvider,t.grammarName)}Cg.validateGrammar=kye});var mg=w(vn=>{"use strict";var Hp=vn&&vn.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!="function"&&r!==null)throw new TypeError("Class extends value "+String(r)+" is not a constructor or null");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(vn,"__esModule",{value:!0});vn.EarlyExitException=vn.NotAllInputParsedException=vn.NoViableAltException=vn.MismatchedTokenException=vn.isRecognitionException=void 0;var xye=Yt(),bG="MismatchedTokenException",QG="NoViableAltException",vG="EarlyExitException",SG="NotAllInputParsedException",kG=[bG,QG,vG,SG];Object.freeze(kG);function Pye(t){return(0,xye.contains)(kG,t.name)}vn.isRecognitionException=Pye;var ty=function(t){Hp(e,t);function e(r,i){var n=this.constructor,s=t.call(this,r)||this;return s.token=i,s.resyncedTokens=[],Object.setPrototypeOf(s,n.prototype),Error.captureStackTrace&&Error.captureStackTrace(s,s.constructor),s}return e}(Error),Dye=function(t){Hp(e,t);function e(r,i,n){var s=t.call(this,r,i)||this;return s.previousToken=n,s.name=bG,s}return e}(ty);vn.MismatchedTokenException=Dye;var Rye=function(t){Hp(e,t);function e(r,i,n){var s=t.call(this,r,i)||this;return s.previousToken=n,s.name=QG,s}return e}(ty);vn.NoViableAltException=Rye;var Fye=function(t){Hp(e,t);function e(r,i){var n=t.call(this,r,i)||this;return n.name=SG,n}return e}(ty);vn.NotAllInputParsedException=Fye;var Nye=function(t){Hp(e,t);function e(r,i,n){var s=t.call(this,r,i)||this;return s.previousToken=n,s.name=vG,s}return e}(ty);vn.EarlyExitException=Nye});var nS=w(Yi=>{"use strict";Object.defineProperty(Yi,"__esModule",{value:!0});Yi.attemptInRepetitionRecovery=Yi.Recoverable=Yi.InRuleRecoveryException=Yi.IN_RULE_RECOVERY_EXCEPTION=Yi.EOF_FOLLOW_KEY=void 0;var ry=JA(),vs=Yt(),Lye=mg(),Tye=qv(),Oye=Xn();Yi.EOF_FOLLOW_KEY={};Yi.IN_RULE_RECOVERY_EXCEPTION="InRuleRecoveryException";function iS(t){this.name=Yi.IN_RULE_RECOVERY_EXCEPTION,this.message=t}Yi.InRuleRecoveryException=iS;iS.prototype=Error.prototype;var Mye=function(){function t(){}return t.prototype.initRecoverable=function(e){this.firstAfterRepMap={},this.resyncFollows={},this.recoveryEnabled=(0,vs.has)(e,"recoveryEnabled")?e.recoveryEnabled:Oye.DEFAULT_PARSER_CONFIG.recoveryEnabled,this.recoveryEnabled&&(this.attemptInRepetitionRecovery=xG)},t.prototype.getTokenToInsert=function(e){var r=(0,ry.createTokenInstance)(e,"",NaN,NaN,NaN,NaN,NaN,NaN);return r.isInsertedInRecovery=!0,r},t.prototype.canTokenTypeBeInsertedInRecovery=function(e){return!0},t.prototype.tryInRepetitionRecovery=function(e,r,i,n){for(var s=this,o=this.findReSyncTokenType(),a=this.exportLexerState(),l=[],c=!1,u=this.LA(1),g=this.LA(1),f=function(){var h=s.LA(0),p=s.errorMessageProvider.buildMismatchTokenMessage({expected:n,actual:u,previous:h,ruleName:s.getCurrRuleFullName()}),m=new Lye.MismatchedTokenException(p,u,s.LA(0));m.resyncedTokens=(0,vs.dropRight)(l),s.SAVE_ERROR(m)};!c;)if(this.tokenMatcher(g,n)){f();return}else if(i.call(this)){f(),e.apply(this,r);return}else this.tokenMatcher(g,o)?c=!0:(g=this.SKIP_TOKEN(),this.addToResyncTokens(g,l));this.importLexerState(a)},t.prototype.shouldInRepetitionRecoveryBeTried=function(e,r,i){return!(i===!1||e===void 0||r===void 0||this.tokenMatcher(this.LA(1),e)||this.isBackTracking()||this.canPerformInRuleRecovery(e,this.getFollowsForInRuleRecovery(e,r)))},t.prototype.getFollowsForInRuleRecovery=function(e,r){var i=this.getCurrentGrammarPath(e,r),n=this.getNextPossibleTokenTypes(i);return n},t.prototype.tryInRuleRecovery=function(e,r){if(this.canRecoverWithSingleTokenInsertion(e,r)){var i=this.getTokenToInsert(e);return i}if(this.canRecoverWithSingleTokenDeletion(e)){var n=this.SKIP_TOKEN();return this.consumeToken(),n}throw new iS("sad sad panda")},t.prototype.canPerformInRuleRecovery=function(e,r){return this.canRecoverWithSingleTokenInsertion(e,r)||this.canRecoverWithSingleTokenDeletion(e)},t.prototype.canRecoverWithSingleTokenInsertion=function(e,r){var i=this;if(!this.canTokenTypeBeInsertedInRecovery(e)||(0,vs.isEmpty)(r))return!1;var n=this.LA(1),s=(0,vs.find)(r,function(o){return i.tokenMatcher(n,o)})!==void 0;return s},t.prototype.canRecoverWithSingleTokenDeletion=function(e){var r=this.tokenMatcher(this.LA(2),e);return r},t.prototype.isInCurrentRuleReSyncSet=function(e){var r=this.getCurrFollowKey(),i=this.getFollowSetFromFollowKey(r);return(0,vs.contains)(i,e)},t.prototype.findReSyncTokenType=function(){for(var e=this.flattenFollowSet(),r=this.LA(1),i=2;;){var n=r.tokenType;if((0,vs.contains)(e,n))return n;r=this.LA(i),i++}},t.prototype.getCurrFollowKey=function(){if(this.RULE_STACK.length===1)return Yi.EOF_FOLLOW_KEY;var e=this.getLastExplicitRuleShortName(),r=this.getLastExplicitRuleOccurrenceIndex(),i=this.getPreviousExplicitRuleShortName();return{ruleName:this.shortRuleNameToFullName(e),idxInCallingRule:r,inRule:this.shortRuleNameToFullName(i)}},t.prototype.buildFullFollowKeyStack=function(){var e=this,r=this.RULE_STACK,i=this.RULE_OCCURRENCE_STACK;return(0,vs.map)(r,function(n,s){return s===0?Yi.EOF_FOLLOW_KEY:{ruleName:e.shortRuleNameToFullName(n),idxInCallingRule:i[s],inRule:e.shortRuleNameToFullName(r[s-1])}})},t.prototype.flattenFollowSet=function(){var e=this,r=(0,vs.map)(this.buildFullFollowKeyStack(),function(i){return e.getFollowSetFromFollowKey(i)});return(0,vs.flatten)(r)},t.prototype.getFollowSetFromFollowKey=function(e){if(e===Yi.EOF_FOLLOW_KEY)return[ry.EOF];var r=e.ruleName+e.idxInCallingRule+Tye.IN+e.inRule;return this.resyncFollows[r]},t.prototype.addToResyncTokens=function(e,r){return this.tokenMatcher(e,ry.EOF)||r.push(e),r},t.prototype.reSyncTo=function(e){for(var r=[],i=this.LA(1);this.tokenMatcher(i,e)===!1;)i=this.SKIP_TOKEN(),this.addToResyncTokens(i,r);return(0,vs.dropRight)(r)},t.prototype.attemptInRepetitionRecovery=function(e,r,i,n,s,o,a){},t.prototype.getCurrentGrammarPath=function(e,r){var i=this.getHumanReadableRuleStack(),n=(0,vs.cloneArr)(this.RULE_OCCURRENCE_STACK),s={ruleStack:i,occurrenceStack:n,lastTok:e,lastTokOccurrence:r};return s},t.prototype.getHumanReadableRuleStack=function(){var e=this;return(0,vs.map)(this.RULE_STACK,function(r){return e.shortRuleNameToFullName(r)})},t}();Yi.Recoverable=Mye;function xG(t,e,r,i,n,s,o){var a=this.getKeyForAutomaticLookahead(i,n),l=this.firstAfterRepMap[a];if(l===void 0){var c=this.getCurrRuleFullName(),u=this.getGAstProductions()[c],g=new s(u,n);l=g.startWalking(),this.firstAfterRepMap[a]=l}var f=l.token,h=l.occurrence,p=l.isEndOfRule;this.RULE_STACK.length===1&&p&&f===void 0&&(f=ry.EOF,h=1),this.shouldInRepetitionRecoveryBeTried(f,h,o)&&this.tryInRepetitionRecovery(t,e,r,f)}Yi.attemptInRepetitionRecovery=xG});var iy=w(Jt=>{"use strict";Object.defineProperty(Jt,"__esModule",{value:!0});Jt.getKeyForAutomaticLookahead=Jt.AT_LEAST_ONE_SEP_IDX=Jt.MANY_SEP_IDX=Jt.AT_LEAST_ONE_IDX=Jt.MANY_IDX=Jt.OPTION_IDX=Jt.OR_IDX=Jt.BITS_FOR_ALT_IDX=Jt.BITS_FOR_RULE_IDX=Jt.BITS_FOR_OCCURRENCE_IDX=Jt.BITS_FOR_METHOD_TYPE=void 0;Jt.BITS_FOR_METHOD_TYPE=4;Jt.BITS_FOR_OCCURRENCE_IDX=8;Jt.BITS_FOR_RULE_IDX=12;Jt.BITS_FOR_ALT_IDX=8;Jt.OR_IDX=1<{"use strict";Object.defineProperty(ny,"__esModule",{value:!0});ny.LooksAhead=void 0;var Ha=Up(),lo=Yt(),PG=Xn(),ja=iy(),Qc=Lp(),Kye=function(){function t(){}return t.prototype.initLooksAhead=function(e){this.dynamicTokensEnabled=(0,lo.has)(e,"dynamicTokensEnabled")?e.dynamicTokensEnabled:PG.DEFAULT_PARSER_CONFIG.dynamicTokensEnabled,this.maxLookahead=(0,lo.has)(e,"maxLookahead")?e.maxLookahead:PG.DEFAULT_PARSER_CONFIG.maxLookahead,this.lookAheadFuncsCache=(0,lo.isES2015MapSupported)()?new Map:[],(0,lo.isES2015MapSupported)()?(this.getLaFuncFromCache=this.getLaFuncFromMap,this.setLaFuncCache=this.setLaFuncCacheUsingMap):(this.getLaFuncFromCache=this.getLaFuncFromObj,this.setLaFuncCache=this.setLaFuncUsingObj)},t.prototype.preComputeLookaheadFunctions=function(e){var r=this;(0,lo.forEach)(e,function(i){r.TRACE_INIT(i.name+" Rule Lookahead",function(){var n=(0,Qc.collectMethods)(i),s=n.alternation,o=n.repetition,a=n.option,l=n.repetitionMandatory,c=n.repetitionMandatoryWithSeparator,u=n.repetitionWithSeparator;(0,lo.forEach)(s,function(g){var f=g.idx===0?"":g.idx;r.TRACE_INIT(""+(0,Qc.getProductionDslName)(g)+f,function(){var h=(0,Ha.buildLookaheadFuncForOr)(g.idx,i,g.maxLookahead||r.maxLookahead,g.hasPredicates,r.dynamicTokensEnabled,r.lookAheadBuilderForAlternatives),p=(0,ja.getKeyForAutomaticLookahead)(r.fullRuleNameToShort[i.name],ja.OR_IDX,g.idx);r.setLaFuncCache(p,h)})}),(0,lo.forEach)(o,function(g){r.computeLookaheadFunc(i,g.idx,ja.MANY_IDX,Ha.PROD_TYPE.REPETITION,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(a,function(g){r.computeLookaheadFunc(i,g.idx,ja.OPTION_IDX,Ha.PROD_TYPE.OPTION,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(l,function(g){r.computeLookaheadFunc(i,g.idx,ja.AT_LEAST_ONE_IDX,Ha.PROD_TYPE.REPETITION_MANDATORY,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(c,function(g){r.computeLookaheadFunc(i,g.idx,ja.AT_LEAST_ONE_SEP_IDX,Ha.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(u,function(g){r.computeLookaheadFunc(i,g.idx,ja.MANY_SEP_IDX,Ha.PROD_TYPE.REPETITION_WITH_SEPARATOR,g.maxLookahead,(0,Qc.getProductionDslName)(g))})})})},t.prototype.computeLookaheadFunc=function(e,r,i,n,s,o){var a=this;this.TRACE_INIT(""+o+(r===0?"":r),function(){var l=(0,Ha.buildLookaheadFuncForOptionalProd)(r,e,s||a.maxLookahead,a.dynamicTokensEnabled,n,a.lookAheadBuilderForOptional),c=(0,ja.getKeyForAutomaticLookahead)(a.fullRuleNameToShort[e.name],i,r);a.setLaFuncCache(c,l)})},t.prototype.lookAheadBuilderForOptional=function(e,r,i){return(0,Ha.buildSingleAlternativeLookaheadFunction)(e,r,i)},t.prototype.lookAheadBuilderForAlternatives=function(e,r,i,n){return(0,Ha.buildAlternativesLookAheadFunc)(e,r,i,n)},t.prototype.getKeyForAutomaticLookahead=function(e,r){var i=this.getLastExplicitRuleShortName();return(0,ja.getKeyForAutomaticLookahead)(i,e,r)},t.prototype.getLaFuncFromCache=function(e){},t.prototype.getLaFuncFromMap=function(e){return this.lookAheadFuncsCache.get(e)},t.prototype.getLaFuncFromObj=function(e){return this.lookAheadFuncsCache[e]},t.prototype.setLaFuncCache=function(e,r){},t.prototype.setLaFuncCacheUsingMap=function(e,r){this.lookAheadFuncsCache.set(e,r)},t.prototype.setLaFuncUsingObj=function(e,r){this.lookAheadFuncsCache[e]=r},t}();ny.LooksAhead=Kye});var RG=w(Ko=>{"use strict";Object.defineProperty(Ko,"__esModule",{value:!0});Ko.addNoneTerminalToCst=Ko.addTerminalToCst=Ko.setNodeLocationFull=Ko.setNodeLocationOnlyOffset=void 0;function Hye(t,e){isNaN(t.startOffset)===!0?(t.startOffset=e.startOffset,t.endOffset=e.endOffset):t.endOffset{"use strict";Object.defineProperty(_A,"__esModule",{value:!0});_A.defineNameProp=_A.functionName=_A.classNameFromInstance=void 0;var qye=Yt();function Jye(t){return FG(t.constructor)}_A.classNameFromInstance=Jye;var NG="name";function FG(t){var e=t.name;return e||"anonymous"}_A.functionName=FG;function Wye(t,e){var r=Object.getOwnPropertyDescriptor(t,NG);return(0,qye.isUndefined)(r)||r.configurable?(Object.defineProperty(t,NG,{enumerable:!1,configurable:!0,writable:!1,value:e}),!0):!1}_A.defineNameProp=Wye});var UG=w(Pi=>{"use strict";Object.defineProperty(Pi,"__esModule",{value:!0});Pi.validateRedundantMethods=Pi.validateMissingCstMethods=Pi.validateVisitor=Pi.CstVisitorDefinitionError=Pi.createBaseVisitorConstructorWithDefaults=Pi.createBaseSemanticVisitorConstructor=Pi.defaultVisit=void 0;var Ss=Yt(),jp=sS();function LG(t,e){for(var r=(0,Ss.keys)(t),i=r.length,n=0;n:
+ `+(""+s.join(`
+
+`).replace(/\n/g,`
+ `)))}}};return r.prototype=i,r.prototype.constructor=r,r._RULE_NAMES=e,r}Pi.createBaseSemanticVisitorConstructor=zye;function _ye(t,e,r){var i=function(){};(0,jp.defineNameProp)(i,t+"BaseSemanticsWithDefaults");var n=Object.create(r.prototype);return(0,Ss.forEach)(e,function(s){n[s]=LG}),i.prototype=n,i.prototype.constructor=i,i}Pi.createBaseVisitorConstructorWithDefaults=_ye;var oS;(function(t){t[t.REDUNDANT_METHOD=0]="REDUNDANT_METHOD",t[t.MISSING_METHOD=1]="MISSING_METHOD"})(oS=Pi.CstVisitorDefinitionError||(Pi.CstVisitorDefinitionError={}));function TG(t,e){var r=OG(t,e),i=MG(t,e);return r.concat(i)}Pi.validateVisitor=TG;function OG(t,e){var r=(0,Ss.map)(e,function(i){if(!(0,Ss.isFunction)(t[i]))return{msg:"Missing visitor method: <"+i+"> on "+(0,jp.functionName)(t.constructor)+" CST Visitor.",type:oS.MISSING_METHOD,methodName:i}});return(0,Ss.compact)(r)}Pi.validateMissingCstMethods=OG;var Vye=["constructor","visit","validateVisitor"];function MG(t,e){var r=[];for(var i in t)(0,Ss.isFunction)(t[i])&&!(0,Ss.contains)(Vye,i)&&!(0,Ss.contains)(e,i)&&r.push({msg:"Redundant visitor method: <"+i+"> on "+(0,jp.functionName)(t.constructor)+` CST Visitor
+There is no Grammar Rule corresponding to this method's name.
+`,type:oS.REDUNDANT_METHOD,methodName:i});return r}Pi.validateRedundantMethods=MG});var HG=w(sy=>{"use strict";Object.defineProperty(sy,"__esModule",{value:!0});sy.TreeBuilder=void 0;var Eg=RG(),ii=Yt(),KG=UG(),Xye=Xn(),Zye=function(){function t(){}return t.prototype.initTreeBuilder=function(e){if(this.CST_STACK=[],this.outputCst=e.outputCst,this.nodeLocationTracking=(0,ii.has)(e,"nodeLocationTracking")?e.nodeLocationTracking:Xye.DEFAULT_PARSER_CONFIG.nodeLocationTracking,!this.outputCst)this.cstInvocationStateUpdate=ii.NOOP,this.cstFinallyStateUpdate=ii.NOOP,this.cstPostTerminal=ii.NOOP,this.cstPostNonTerminal=ii.NOOP,this.cstPostRule=ii.NOOP;else if(/full/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=Eg.setNodeLocationFull,this.setNodeLocationFromNode=Eg.setNodeLocationFull,this.cstPostRule=ii.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationFullRecovery):(this.setNodeLocationFromToken=ii.NOOP,this.setNodeLocationFromNode=ii.NOOP,this.cstPostRule=this.cstPostRuleFull,this.setInitialNodeLocation=this.setInitialNodeLocationFullRegular);else if(/onlyOffset/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=Eg.setNodeLocationOnlyOffset,this.setNodeLocationFromNode=Eg.setNodeLocationOnlyOffset,this.cstPostRule=ii.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRecovery):(this.setNodeLocationFromToken=ii.NOOP,this.setNodeLocationFromNode=ii.NOOP,this.cstPostRule=this.cstPostRuleOnlyOffset,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRegular);else if(/none/i.test(this.nodeLocationTracking))this.setNodeLocationFromToken=ii.NOOP,this.setNodeLocationFromNode=ii.NOOP,this.cstPostRule=ii.NOOP,this.setInitialNodeLocation=ii.NOOP;else throw Error('Invalid config option: "'+e.nodeLocationTracking+'"')},t.prototype.setInitialNodeLocationOnlyOffsetRecovery=function(e){e.location={startOffset:NaN,endOffset:NaN}},t.prototype.setInitialNodeLocationOnlyOffsetRegular=function(e){e.location={startOffset:this.LA(1).startOffset,endOffset:NaN}},t.prototype.setInitialNodeLocationFullRecovery=function(e){e.location={startOffset:NaN,startLine:NaN,startColumn:NaN,endOffset:NaN,endLine:NaN,endColumn:NaN}},t.prototype.setInitialNodeLocationFullRegular=function(e){var r=this.LA(1);e.location={startOffset:r.startOffset,startLine:r.startLine,startColumn:r.startColumn,endOffset:NaN,endLine:NaN,endColumn:NaN}},t.prototype.cstInvocationStateUpdate=function(e,r){var i={name:e,children:{}};this.setInitialNodeLocation(i),this.CST_STACK.push(i)},t.prototype.cstFinallyStateUpdate=function(){this.CST_STACK.pop()},t.prototype.cstPostRuleFull=function(e){var r=this.LA(0),i=e.location;i.startOffset<=r.startOffset?(i.endOffset=r.endOffset,i.endLine=r.endLine,i.endColumn=r.endColumn):(i.startOffset=NaN,i.startLine=NaN,i.startColumn=NaN)},t.prototype.cstPostRuleOnlyOffset=function(e){var r=this.LA(0),i=e.location;i.startOffset<=r.startOffset?i.endOffset=r.endOffset:i.startOffset=NaN},t.prototype.cstPostTerminal=function(e,r){var i=this.CST_STACK[this.CST_STACK.length-1];(0,Eg.addTerminalToCst)(i,r,e),this.setNodeLocationFromToken(i.location,r)},t.prototype.cstPostNonTerminal=function(e,r){var i=this.CST_STACK[this.CST_STACK.length-1];(0,Eg.addNoneTerminalToCst)(i,r,e),this.setNodeLocationFromNode(i.location,e.location)},t.prototype.getBaseCstVisitorConstructor=function(){if((0,ii.isUndefined)(this.baseCstVisitorConstructor)){var e=(0,KG.createBaseSemanticVisitorConstructor)(this.className,(0,ii.keys)(this.gastProductionsCache));return this.baseCstVisitorConstructor=e,e}return this.baseCstVisitorConstructor},t.prototype.getBaseCstVisitorConstructorWithDefaults=function(){if((0,ii.isUndefined)(this.baseCstVisitorWithDefaultsConstructor)){var e=(0,KG.createBaseVisitorConstructorWithDefaults)(this.className,(0,ii.keys)(this.gastProductionsCache),this.getBaseCstVisitorConstructor());return this.baseCstVisitorWithDefaultsConstructor=e,e}return this.baseCstVisitorWithDefaultsConstructor},t.prototype.getLastExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-1]},t.prototype.getPreviousExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-2]},t.prototype.getLastExplicitRuleOccurrenceIndex=function(){var e=this.RULE_OCCURRENCE_STACK;return e[e.length-1]},t}();sy.TreeBuilder=Zye});var GG=w(oy=>{"use strict";Object.defineProperty(oy,"__esModule",{value:!0});oy.LexerAdapter=void 0;var jG=Xn(),$ye=function(){function t(){}return t.prototype.initLexerAdapter=function(){this.tokVector=[],this.tokVectorLength=0,this.currIdx=-1},Object.defineProperty(t.prototype,"input",{get:function(){return this.tokVector},set:function(e){if(this.selfAnalysisDone!==!0)throw Error("Missing invocation at the end of the Parser's constructor.");this.reset(),this.tokVector=e,this.tokVectorLength=e.length},enumerable:!1,configurable:!0}),t.prototype.SKIP_TOKEN=function(){return this.currIdx<=this.tokVector.length-2?(this.consumeToken(),this.LA(1)):jG.END_OF_FILE},t.prototype.LA=function(e){var r=this.currIdx+e;return r<0||this.tokVectorLength<=r?jG.END_OF_FILE:this.tokVector[r]},t.prototype.consumeToken=function(){this.currIdx++},t.prototype.exportLexerState=function(){return this.currIdx},t.prototype.importLexerState=function(e){this.currIdx=e},t.prototype.resetLexerState=function(){this.currIdx=-1},t.prototype.moveToTerminatedState=function(){this.currIdx=this.tokVector.length-1},t.prototype.getLexerPosition=function(){return this.exportLexerState()},t}();oy.LexerAdapter=$ye});var qG=w(ay=>{"use strict";Object.defineProperty(ay,"__esModule",{value:!0});ay.RecognizerApi=void 0;var YG=Yt(),ewe=mg(),aS=Xn(),twe=Tp(),rwe=tS(),iwe=bn(),nwe=function(){function t(){}return t.prototype.ACTION=function(e){return e.call(this)},t.prototype.consume=function(e,r,i){return this.consumeInternal(r,e,i)},t.prototype.subrule=function(e,r,i){return this.subruleInternal(r,e,i)},t.prototype.option=function(e,r){return this.optionInternal(r,e)},t.prototype.or=function(e,r){return this.orInternal(r,e)},t.prototype.many=function(e,r){return this.manyInternal(e,r)},t.prototype.atLeastOne=function(e,r){return this.atLeastOneInternal(e,r)},t.prototype.CONSUME=function(e,r){return this.consumeInternal(e,0,r)},t.prototype.CONSUME1=function(e,r){return this.consumeInternal(e,1,r)},t.prototype.CONSUME2=function(e,r){return this.consumeInternal(e,2,r)},t.prototype.CONSUME3=function(e,r){return this.consumeInternal(e,3,r)},t.prototype.CONSUME4=function(e,r){return this.consumeInternal(e,4,r)},t.prototype.CONSUME5=function(e,r){return this.consumeInternal(e,5,r)},t.prototype.CONSUME6=function(e,r){return this.consumeInternal(e,6,r)},t.prototype.CONSUME7=function(e,r){return this.consumeInternal(e,7,r)},t.prototype.CONSUME8=function(e,r){return this.consumeInternal(e,8,r)},t.prototype.CONSUME9=function(e,r){return this.consumeInternal(e,9,r)},t.prototype.SUBRULE=function(e,r){return this.subruleInternal(e,0,r)},t.prototype.SUBRULE1=function(e,r){return this.subruleInternal(e,1,r)},t.prototype.SUBRULE2=function(e,r){return this.subruleInternal(e,2,r)},t.prototype.SUBRULE3=function(e,r){return this.subruleInternal(e,3,r)},t.prototype.SUBRULE4=function(e,r){return this.subruleInternal(e,4,r)},t.prototype.SUBRULE5=function(e,r){return this.subruleInternal(e,5,r)},t.prototype.SUBRULE6=function(e,r){return this.subruleInternal(e,6,r)},t.prototype.SUBRULE7=function(e,r){return this.subruleInternal(e,7,r)},t.prototype.SUBRULE8=function(e,r){return this.subruleInternal(e,8,r)},t.prototype.SUBRULE9=function(e,r){return this.subruleInternal(e,9,r)},t.prototype.OPTION=function(e){return this.optionInternal(e,0)},t.prototype.OPTION1=function(e){return this.optionInternal(e,1)},t.prototype.OPTION2=function(e){return this.optionInternal(e,2)},t.prototype.OPTION3=function(e){return this.optionInternal(e,3)},t.prototype.OPTION4=function(e){return this.optionInternal(e,4)},t.prototype.OPTION5=function(e){return this.optionInternal(e,5)},t.prototype.OPTION6=function(e){return this.optionInternal(e,6)},t.prototype.OPTION7=function(e){return this.optionInternal(e,7)},t.prototype.OPTION8=function(e){return this.optionInternal(e,8)},t.prototype.OPTION9=function(e){return this.optionInternal(e,9)},t.prototype.OR=function(e){return this.orInternal(e,0)},t.prototype.OR1=function(e){return this.orInternal(e,1)},t.prototype.OR2=function(e){return this.orInternal(e,2)},t.prototype.OR3=function(e){return this.orInternal(e,3)},t.prototype.OR4=function(e){return this.orInternal(e,4)},t.prototype.OR5=function(e){return this.orInternal(e,5)},t.prototype.OR6=function(e){return this.orInternal(e,6)},t.prototype.OR7=function(e){return this.orInternal(e,7)},t.prototype.OR8=function(e){return this.orInternal(e,8)},t.prototype.OR9=function(e){return this.orInternal(e,9)},t.prototype.MANY=function(e){this.manyInternal(0,e)},t.prototype.MANY1=function(e){this.manyInternal(1,e)},t.prototype.MANY2=function(e){this.manyInternal(2,e)},t.prototype.MANY3=function(e){this.manyInternal(3,e)},t.prototype.MANY4=function(e){this.manyInternal(4,e)},t.prototype.MANY5=function(e){this.manyInternal(5,e)},t.prototype.MANY6=function(e){this.manyInternal(6,e)},t.prototype.MANY7=function(e){this.manyInternal(7,e)},t.prototype.MANY8=function(e){this.manyInternal(8,e)},t.prototype.MANY9=function(e){this.manyInternal(9,e)},t.prototype.MANY_SEP=function(e){this.manySepFirstInternal(0,e)},t.prototype.MANY_SEP1=function(e){this.manySepFirstInternal(1,e)},t.prototype.MANY_SEP2=function(e){this.manySepFirstInternal(2,e)},t.prototype.MANY_SEP3=function(e){this.manySepFirstInternal(3,e)},t.prototype.MANY_SEP4=function(e){this.manySepFirstInternal(4,e)},t.prototype.MANY_SEP5=function(e){this.manySepFirstInternal(5,e)},t.prototype.MANY_SEP6=function(e){this.manySepFirstInternal(6,e)},t.prototype.MANY_SEP7=function(e){this.manySepFirstInternal(7,e)},t.prototype.MANY_SEP8=function(e){this.manySepFirstInternal(8,e)},t.prototype.MANY_SEP9=function(e){this.manySepFirstInternal(9,e)},t.prototype.AT_LEAST_ONE=function(e){this.atLeastOneInternal(0,e)},t.prototype.AT_LEAST_ONE1=function(e){return this.atLeastOneInternal(1,e)},t.prototype.AT_LEAST_ONE2=function(e){this.atLeastOneInternal(2,e)},t.prototype.AT_LEAST_ONE3=function(e){this.atLeastOneInternal(3,e)},t.prototype.AT_LEAST_ONE4=function(e){this.atLeastOneInternal(4,e)},t.prototype.AT_LEAST_ONE5=function(e){this.atLeastOneInternal(5,e)},t.prototype.AT_LEAST_ONE6=function(e){this.atLeastOneInternal(6,e)},t.prototype.AT_LEAST_ONE7=function(e){this.atLeastOneInternal(7,e)},t.prototype.AT_LEAST_ONE8=function(e){this.atLeastOneInternal(8,e)},t.prototype.AT_LEAST_ONE9=function(e){this.atLeastOneInternal(9,e)},t.prototype.AT_LEAST_ONE_SEP=function(e){this.atLeastOneSepFirstInternal(0,e)},t.prototype.AT_LEAST_ONE_SEP1=function(e){this.atLeastOneSepFirstInternal(1,e)},t.prototype.AT_LEAST_ONE_SEP2=function(e){this.atLeastOneSepFirstInternal(2,e)},t.prototype.AT_LEAST_ONE_SEP3=function(e){this.atLeastOneSepFirstInternal(3,e)},t.prototype.AT_LEAST_ONE_SEP4=function(e){this.atLeastOneSepFirstInternal(4,e)},t.prototype.AT_LEAST_ONE_SEP5=function(e){this.atLeastOneSepFirstInternal(5,e)},t.prototype.AT_LEAST_ONE_SEP6=function(e){this.atLeastOneSepFirstInternal(6,e)},t.prototype.AT_LEAST_ONE_SEP7=function(e){this.atLeastOneSepFirstInternal(7,e)},t.prototype.AT_LEAST_ONE_SEP8=function(e){this.atLeastOneSepFirstInternal(8,e)},t.prototype.AT_LEAST_ONE_SEP9=function(e){this.atLeastOneSepFirstInternal(9,e)},t.prototype.RULE=function(e,r,i){if(i===void 0&&(i=aS.DEFAULT_RULE_CONFIG),(0,YG.contains)(this.definedRulesNames,e)){var n=twe.defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({topLevelRule:e,grammarName:this.className}),s={message:n,type:aS.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:e};this.definitionErrors.push(s)}this.definedRulesNames.push(e);var o=this.defineRule(e,r,i);return this[e]=o,o},t.prototype.OVERRIDE_RULE=function(e,r,i){i===void 0&&(i=aS.DEFAULT_RULE_CONFIG);var n=[];n=n.concat((0,rwe.validateRuleIsOverridden)(e,this.definedRulesNames,this.className)),this.definitionErrors=this.definitionErrors.concat(n);var s=this.defineRule(e,r,i);return this[e]=s,s},t.prototype.BACKTRACK=function(e,r){return function(){this.isBackTrackingStack.push(1);var i=this.saveRecogState();try{return e.apply(this,r),!0}catch(n){if((0,ewe.isRecognitionException)(n))return!1;throw n}finally{this.reloadRecogState(i),this.isBackTrackingStack.pop()}}},t.prototype.getGAstProductions=function(){return this.gastProductionsCache},t.prototype.getSerializedGastProductions=function(){return(0,iwe.serializeGrammar)((0,YG.values)(this.gastProductionsCache))},t}();ay.RecognizerApi=nwe});var _G=w(Ay=>{"use strict";Object.defineProperty(Ay,"__esModule",{value:!0});Ay.RecognizerEngine=void 0;var Rr=Yt(),Zn=iy(),ly=mg(),JG=Up(),Ig=Mp(),WG=Xn(),swe=nS(),zG=JA(),Gp=fg(),owe=sS(),awe=function(){function t(){}return t.prototype.initRecognizerEngine=function(e,r){if(this.className=(0,owe.classNameFromInstance)(this),this.shortRuleNameToFull={},this.fullRuleNameToShort={},this.ruleShortNameIdx=256,this.tokenMatcher=Gp.tokenStructuredMatcherNoCategories,this.definedRulesNames=[],this.tokensMap={},this.isBackTrackingStack=[],this.RULE_STACK=[],this.RULE_OCCURRENCE_STACK=[],this.gastProductionsCache={},(0,Rr.has)(r,"serializedGrammar"))throw Error(`The Parser's configuration can no longer contain a property.
+ See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_6-0-0
+ For Further details.`);if((0,Rr.isArray)(e)){if((0,Rr.isEmpty)(e))throw Error(`A Token Vocabulary cannot be empty.
+ Note that the first argument for the parser constructor
+ is no longer a Token vector (since v4.0).`);if(typeof e[0].startOffset=="number")throw Error(`The Parser constructor no longer accepts a token vector as the first argument.
+ See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_4-0-0
+ For Further details.`)}if((0,Rr.isArray)(e))this.tokensMap=(0,Rr.reduce)(e,function(o,a){return o[a.name]=a,o},{});else if((0,Rr.has)(e,"modes")&&(0,Rr.every)((0,Rr.flatten)((0,Rr.values)(e.modes)),Gp.isTokenType)){var i=(0,Rr.flatten)((0,Rr.values)(e.modes)),n=(0,Rr.uniq)(i);this.tokensMap=(0,Rr.reduce)(n,function(o,a){return o[a.name]=a,o},{})}else if((0,Rr.isObject)(e))this.tokensMap=(0,Rr.cloneObj)(e);else throw new Error("